From bb12c1fd00eb51118749bbbc69c5596835fcbd3b Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 4 May 2024 19:31:02 +0200 Subject: Adding upstream version 5:7.0.15. Signed-off-by: Daniel Baumann --- deps/Makefile | 111 + deps/README.md | 107 + deps/hdr_histogram/COPYING.txt | 121 + deps/hdr_histogram/LICENSE.txt | 41 + deps/hdr_histogram/Makefile | 27 + deps/hdr_histogram/README.md | 78 + deps/hdr_histogram/hdr_atomic.h | 146 + deps/hdr_histogram/hdr_histogram.c | 1229 ++ deps/hdr_histogram/hdr_histogram.h | 521 + deps/hdr_histogram/hdr_redis_malloc.h | 13 + deps/hdr_histogram/hdr_tests.h | 22 + deps/hiredis/.github/workflows/build.yml | 205 + deps/hiredis/.gitignore | 9 + deps/hiredis/.travis.yml | 125 + deps/hiredis/CHANGELOG.md | 364 + deps/hiredis/CMakeLists.txt | 245 + deps/hiredis/COPYING | 29 + deps/hiredis/Makefile | 338 + deps/hiredis/README.md | 671 + deps/hiredis/adapters/ae.h | 130 + deps/hiredis/adapters/glib.h | 156 + deps/hiredis/adapters/ivykis.h | 84 + deps/hiredis/adapters/libev.h | 188 + deps/hiredis/adapters/libevent.h | 175 + deps/hiredis/adapters/libuv.h | 163 + deps/hiredis/adapters/macosx.h | 115 + deps/hiredis/adapters/qt.h | 135 + deps/hiredis/alloc.c | 90 + deps/hiredis/alloc.h | 96 + deps/hiredis/appveyor.yml | 24 + deps/hiredis/async.c | 909 ++ deps/hiredis/async.h | 147 + deps/hiredis/async_private.h | 75 + deps/hiredis/dict.c | 343 + deps/hiredis/dict.h | 125 + deps/hiredis/examples/CMakeLists.txt | 49 + deps/hiredis/examples/example-ae.c | 62 + deps/hiredis/examples/example-glib.c | 73 + deps/hiredis/examples/example-ivykis.c | 60 + deps/hiredis/examples/example-libev.c | 54 + deps/hiredis/examples/example-libevent-ssl.c | 90 + deps/hiredis/examples/example-libevent.c | 67 + deps/hiredis/examples/example-libuv.c | 81 + deps/hiredis/examples/example-macosx.c | 66 + deps/hiredis/examples/example-push.c | 159 + deps/hiredis/examples/example-qt.cpp | 46 + deps/hiredis/examples/example-qt.h | 32 + deps/hiredis/examples/example-ssl.c | 113 + deps/hiredis/examples/example.c | 94 + deps/hiredis/fmacros.h | 14 + deps/hiredis/fuzzing/format_command_fuzzer.c | 57 + deps/hiredis/hiredis-config.cmake.in | 13 + deps/hiredis/hiredis.c | 1188 ++ deps/hiredis/hiredis.h | 348 + deps/hiredis/hiredis.pc.in | 12 + deps/hiredis/hiredis.targets | 11 + deps/hiredis/hiredis_ssl-config.cmake.in | 13 + deps/hiredis/hiredis_ssl.h | 129 + deps/hiredis/hiredis_ssl.pc.in | 12 + deps/hiredis/net.c | 612 + deps/hiredis/net.h | 56 + deps/hiredis/read.c | 785 ++ deps/hiredis/read.h | 129 + deps/hiredis/sds.c | 1289 ++ deps/hiredis/sds.h | 278 + deps/hiredis/sdsalloc.h | 44 + deps/hiredis/sdscompat.h | 94 + deps/hiredis/sockcompat.c | 248 + deps/hiredis/sockcompat.h | 92 + deps/hiredis/ssl.c | 569 + deps/hiredis/test.c | 2041 +++ deps/hiredis/test.sh | 78 + deps/hiredis/win32.h | 56 + deps/jemalloc/.appveyor.yml | 42 + deps/jemalloc/.autom4te.cfg | 3 + deps/jemalloc/.cirrus.yml | 21 + deps/jemalloc/.gitattributes | 1 + deps/jemalloc/.gitignore | 92 + deps/jemalloc/.travis.yml | 195 + deps/jemalloc/COPYING | 27 + deps/jemalloc/ChangeLog | 1518 ++ deps/jemalloc/INSTALL.md | 421 + deps/jemalloc/Makefile.in | 623 + deps/jemalloc/README | 20 + deps/jemalloc/TUNING.md | 129 + deps/jemalloc/VERSION | 1 + deps/jemalloc/autogen.sh | 17 + deps/jemalloc/bin/jemalloc-config.in | 83 + deps/jemalloc/bin/jemalloc.sh.in | 9 + deps/jemalloc/bin/jeprof.in | 5625 ++++++++ deps/jemalloc/build-aux/config.guess | 1462 ++ deps/jemalloc/build-aux/config.sub | 1825 +++ deps/jemalloc/build-aux/install-sh | 250 + deps/jemalloc/config.stamp.in | 0 deps/jemalloc/configure | 14112 +++++++++++++++++++ deps/jemalloc/configure.ac | 2406 ++++ deps/jemalloc/doc/html.xsl.in | 5 + deps/jemalloc/doc/jemalloc.xml.in | 3513 +++++ deps/jemalloc/doc/manpages.xsl.in | 4 + deps/jemalloc/doc/stylesheet.xsl | 10 + .../include/jemalloc/internal/arena_externs.h | 104 + .../include/jemalloc/internal/arena_inlines_a.h | 57 + .../include/jemalloc/internal/arena_inlines_b.h | 427 + .../include/jemalloc/internal/arena_stats.h | 271 + .../include/jemalloc/internal/arena_structs_a.h | 11 + .../include/jemalloc/internal/arena_structs_b.h | 232 + .../include/jemalloc/internal/arena_types.h | 51 + deps/jemalloc/include/jemalloc/internal/assert.h | 56 + deps/jemalloc/include/jemalloc/internal/atomic.h | 86 + .../include/jemalloc/internal/atomic_c11.h | 97 + .../include/jemalloc/internal/atomic_gcc_atomic.h | 129 + .../include/jemalloc/internal/atomic_gcc_sync.h | 195 + .../include/jemalloc/internal/atomic_msvc.h | 158 + .../jemalloc/internal/background_thread_externs.h | 32 + .../jemalloc/internal/background_thread_inlines.h | 62 + .../jemalloc/internal/background_thread_structs.h | 54 + .../include/jemalloc/internal/base_externs.h | 22 + .../include/jemalloc/internal/base_inlines.h | 13 + .../include/jemalloc/internal/base_structs.h | 59 + .../include/jemalloc/internal/base_types.h | 33 + deps/jemalloc/include/jemalloc/internal/bin.h | 123 + .../jemalloc/include/jemalloc/internal/bin_stats.h | 54 + .../jemalloc/include/jemalloc/internal/bin_types.h | 17 + deps/jemalloc/include/jemalloc/internal/bit_util.h | 239 + deps/jemalloc/include/jemalloc/internal/bitmap.h | 369 + .../jemalloc/include/jemalloc/internal/cache_bin.h | 131 + deps/jemalloc/include/jemalloc/internal/ckh.h | 101 + deps/jemalloc/include/jemalloc/internal/ctl.h | 134 + deps/jemalloc/include/jemalloc/internal/div.h | 41 + deps/jemalloc/include/jemalloc/internal/emitter.h | 486 + .../include/jemalloc/internal/extent_dss.h | 26 + .../include/jemalloc/internal/extent_externs.h | 83 + .../include/jemalloc/internal/extent_inlines.h | 501 + .../include/jemalloc/internal/extent_mmap.h | 10 + .../include/jemalloc/internal/extent_structs.h | 256 + .../include/jemalloc/internal/extent_types.h | 23 + deps/jemalloc/include/jemalloc/internal/hash.h | 319 + deps/jemalloc/include/jemalloc/internal/hook.h | 163 + .../jemalloc/internal/jemalloc_internal_decls.h | 94 + .../jemalloc/internal/jemalloc_internal_defs.h.in | 366 + .../jemalloc/internal/jemalloc_internal_externs.h | 57 + .../jemalloc/internal/jemalloc_internal_includes.h | 94 + .../internal/jemalloc_internal_inlines_a.h | 174 + .../internal/jemalloc_internal_inlines_b.h | 87 + .../internal/jemalloc_internal_inlines_c.h | 273 + .../jemalloc/internal/jemalloc_internal_macros.h | 114 + .../jemalloc/internal/jemalloc_internal_types.h | 114 + .../jemalloc/internal/jemalloc_preamble.h.in | 213 + .../include/jemalloc/internal/large_externs.h | 32 + deps/jemalloc/include/jemalloc/internal/log.h | 115 + .../jemalloc/include/jemalloc/internal/malloc_io.h | 102 + deps/jemalloc/include/jemalloc/internal/mutex.h | 288 + .../include/jemalloc/internal/mutex_pool.h | 94 + .../include/jemalloc/internal/mutex_prof.h | 108 + deps/jemalloc/include/jemalloc/internal/nstime.h | 34 + deps/jemalloc/include/jemalloc/internal/pages.h | 88 + deps/jemalloc/include/jemalloc/internal/ph.h | 391 + .../include/jemalloc/internal/private_namespace.sh | 5 + .../include/jemalloc/internal/private_symbols.sh | 51 + deps/jemalloc/include/jemalloc/internal/prng.h | 185 + .../include/jemalloc/internal/prof_externs.h | 105 + .../include/jemalloc/internal/prof_inlines_a.h | 85 + .../include/jemalloc/internal/prof_inlines_b.h | 250 + .../include/jemalloc/internal/prof_structs.h | 200 + .../include/jemalloc/internal/prof_types.h | 56 + .../include/jemalloc/internal/public_namespace.sh | 6 + .../jemalloc/internal/public_unnamespace.sh | 6 + deps/jemalloc/include/jemalloc/internal/ql.h | 88 + deps/jemalloc/include/jemalloc/internal/qr.h | 72 + deps/jemalloc/include/jemalloc/internal/quantum.h | 77 + deps/jemalloc/include/jemalloc/internal/rb.h | 1006 ++ deps/jemalloc/include/jemalloc/internal/rtree.h | 528 + .../jemalloc/include/jemalloc/internal/rtree_tsd.h | 50 + .../include/jemalloc/internal/safety_check.h | 26 + deps/jemalloc/include/jemalloc/internal/sc.h | 333 + deps/jemalloc/include/jemalloc/internal/seq.h | 55 + .../include/jemalloc/internal/smoothstep.h | 232 + .../include/jemalloc/internal/smoothstep.sh | 101 + deps/jemalloc/include/jemalloc/internal/spin.h | 40 + deps/jemalloc/include/jemalloc/internal/stats.h | 31 + deps/jemalloc/include/jemalloc/internal/sz.h | 318 + .../include/jemalloc/internal/tcache_externs.h | 53 + .../include/jemalloc/internal/tcache_inlines.h | 227 + .../include/jemalloc/internal/tcache_structs.h | 70 + .../include/jemalloc/internal/tcache_types.h | 59 + .../include/jemalloc/internal/test_hooks.h | 19 + deps/jemalloc/include/jemalloc/internal/ticker.h | 91 + deps/jemalloc/include/jemalloc/internal/tsd.h | 415 + .../include/jemalloc/internal/tsd_generic.h | 163 + .../jemalloc/internal/tsd_malloc_thread_cleanup.h | 61 + deps/jemalloc/include/jemalloc/internal/tsd_tls.h | 60 + .../jemalloc/include/jemalloc/internal/tsd_types.h | 10 + deps/jemalloc/include/jemalloc/internal/tsd_win.h | 139 + deps/jemalloc/include/jemalloc/internal/util.h | 67 + deps/jemalloc/include/jemalloc/internal/witness.h | 347 + deps/jemalloc/include/jemalloc/jemalloc.sh | 27 + deps/jemalloc/include/jemalloc/jemalloc_defs.h.in | 48 + .../jemalloc/include/jemalloc/jemalloc_macros.h.in | 133 + deps/jemalloc/include/jemalloc/jemalloc_mangle.sh | 45 + .../jemalloc/include/jemalloc/jemalloc_protos.h.in | 66 + deps/jemalloc/include/jemalloc/jemalloc_rename.sh | 22 + .../include/jemalloc/jemalloc_typedefs.h.in | 77 + deps/jemalloc/include/msvc_compat/C99/stdbool.h | 20 + deps/jemalloc/include/msvc_compat/C99/stdint.h | 247 + deps/jemalloc/include/msvc_compat/strings.h | 58 + deps/jemalloc/include/msvc_compat/windows_extra.h | 6 + deps/jemalloc/jemalloc.pc.in | 12 + deps/jemalloc/m4/ax_cxx_compile_stdcxx.m4 | 562 + deps/jemalloc/msvc/ReadMe.txt | 23 + deps/jemalloc/msvc/jemalloc_vc2015.sln | 63 + deps/jemalloc/msvc/jemalloc_vc2017.sln | 63 + .../msvc/projects/vc2015/jemalloc/jemalloc.vcxproj | 350 + .../vc2015/jemalloc/jemalloc.vcxproj.filters | 107 + .../vc2015/test_threads/test_threads.vcxproj | 327 + .../test_threads/test_threads.vcxproj.filters | 26 + .../msvc/projects/vc2017/jemalloc/jemalloc.vcxproj | 350 + .../vc2017/jemalloc/jemalloc.vcxproj.filters | 110 + .../vc2017/test_threads/test_threads.vcxproj | 326 + .../test_threads/test_threads.vcxproj.filters | 26 + deps/jemalloc/msvc/test_threads/test_threads.cpp | 88 + deps/jemalloc/msvc/test_threads/test_threads.h | 3 + .../msvc/test_threads/test_threads_main.cpp | 11 + deps/jemalloc/run_tests.sh | 1 + deps/jemalloc/scripts/gen_run_tests.py | 126 + deps/jemalloc/scripts/gen_travis.py | 149 + deps/jemalloc/src/arena.c | 2298 +++ deps/jemalloc/src/background_thread.c | 939 ++ deps/jemalloc/src/base.c | 514 + deps/jemalloc/src/bin.c | 95 + deps/jemalloc/src/bitmap.c | 121 + deps/jemalloc/src/ckh.c | 570 + deps/jemalloc/src/ctl.c | 3435 +++++ deps/jemalloc/src/div.c | 55 + deps/jemalloc/src/extent.c | 2403 ++++ deps/jemalloc/src/extent_dss.c | 271 + deps/jemalloc/src/extent_mmap.c | 42 + deps/jemalloc/src/hash.c | 3 + deps/jemalloc/src/hook.c | 195 + deps/jemalloc/src/jemalloc.c | 3931 ++++++ deps/jemalloc/src/jemalloc_cpp.cpp | 141 + deps/jemalloc/src/large.c | 395 + deps/jemalloc/src/log.c | 78 + deps/jemalloc/src/malloc_io.c | 675 + deps/jemalloc/src/mutex.c | 223 + deps/jemalloc/src/mutex_pool.c | 18 + deps/jemalloc/src/nstime.c | 170 + deps/jemalloc/src/pages.c | 649 + deps/jemalloc/src/prng.c | 3 + deps/jemalloc/src/prof.c | 3160 +++++ deps/jemalloc/src/rtree.c | 320 + deps/jemalloc/src/safety_check.c | 24 + deps/jemalloc/src/sc.c | 313 + deps/jemalloc/src/stats.c | 1457 ++ deps/jemalloc/src/sz.c | 64 + deps/jemalloc/src/tcache.c | 798 ++ deps/jemalloc/src/test_hooks.c | 12 + deps/jemalloc/src/ticker.c | 3 + deps/jemalloc/src/tsd.c | 534 + deps/jemalloc/src/witness.c | 100 + deps/jemalloc/src/zone.c | 469 + deps/jemalloc/test/include/test/SFMT-alti.h | 186 + deps/jemalloc/test/include/test/SFMT-params.h | 132 + deps/jemalloc/test/include/test/SFMT-params11213.h | 81 + deps/jemalloc/test/include/test/SFMT-params1279.h | 81 + .../jemalloc/test/include/test/SFMT-params132049.h | 81 + deps/jemalloc/test/include/test/SFMT-params19937.h | 81 + .../jemalloc/test/include/test/SFMT-params216091.h | 81 + deps/jemalloc/test/include/test/SFMT-params2281.h | 81 + deps/jemalloc/test/include/test/SFMT-params4253.h | 81 + deps/jemalloc/test/include/test/SFMT-params44497.h | 81 + deps/jemalloc/test/include/test/SFMT-params607.h | 81 + deps/jemalloc/test/include/test/SFMT-params86243.h | 81 + deps/jemalloc/test/include/test/SFMT-sse2.h | 157 + deps/jemalloc/test/include/test/SFMT.h | 146 + deps/jemalloc/test/include/test/btalloc.h | 30 + deps/jemalloc/test/include/test/extent_hooks.h | 289 + deps/jemalloc/test/include/test/jemalloc_test.h.in | 173 + .../test/include/test/jemalloc_test_defs.h.in | 9 + deps/jemalloc/test/include/test/math.h | 306 + deps/jemalloc/test/include/test/mq.h | 107 + deps/jemalloc/test/include/test/mtx.h | 21 + deps/jemalloc/test/include/test/test.h | 338 + deps/jemalloc/test/include/test/thd.h | 9 + deps/jemalloc/test/include/test/timer.h | 11 + deps/jemalloc/test/integration/MALLOCX_ARENA.c | 66 + deps/jemalloc/test/integration/aligned_alloc.c | 157 + deps/jemalloc/test/integration/allocated.c | 124 + deps/jemalloc/test/integration/cpp/basic.cpp | 25 + deps/jemalloc/test/integration/extent.c | 248 + deps/jemalloc/test/integration/extent.sh | 5 + deps/jemalloc/test/integration/malloc.c | 16 + deps/jemalloc/test/integration/mallocx.c | 274 + deps/jemalloc/test/integration/mallocx.sh | 5 + deps/jemalloc/test/integration/overflow.c | 59 + deps/jemalloc/test/integration/posix_memalign.c | 128 + deps/jemalloc/test/integration/rallocx.c | 258 + deps/jemalloc/test/integration/sdallocx.c | 55 + deps/jemalloc/test/integration/slab_sizes.c | 80 + deps/jemalloc/test/integration/slab_sizes.sh | 4 + deps/jemalloc/test/integration/smallocx.c | 312 + deps/jemalloc/test/integration/smallocx.sh | 5 + deps/jemalloc/test/integration/thread_arena.c | 86 + .../test/integration/thread_tcache_enabled.c | 87 + deps/jemalloc/test/integration/xallocx.c | 384 + deps/jemalloc/test/integration/xallocx.sh | 5 + deps/jemalloc/test/src/SFMT.c | 719 + deps/jemalloc/test/src/btalloc.c | 6 + deps/jemalloc/test/src/btalloc_0.c | 3 + deps/jemalloc/test/src/btalloc_1.c | 3 + deps/jemalloc/test/src/math.c | 2 + deps/jemalloc/test/src/mq.c | 27 + deps/jemalloc/test/src/mtx.c | 61 + deps/jemalloc/test/src/test.c | 234 + deps/jemalloc/test/src/thd.c | 34 + deps/jemalloc/test/src/timer.c | 56 + deps/jemalloc/test/stress/hookbench.c | 73 + deps/jemalloc/test/stress/microbench.c | 165 + deps/jemalloc/test/test.sh.in | 80 + deps/jemalloc/test/unit/SFMT.c | 1599 +++ deps/jemalloc/test/unit/a0.c | 16 + deps/jemalloc/test/unit/arena_reset.c | 349 + deps/jemalloc/test/unit/arena_reset_prof.c | 4 + deps/jemalloc/test/unit/arena_reset_prof.sh | 3 + deps/jemalloc/test/unit/atomic.c | 229 + deps/jemalloc/test/unit/background_thread.c | 119 + deps/jemalloc/test/unit/background_thread_enable.c | 85 + deps/jemalloc/test/unit/base.c | 234 + deps/jemalloc/test/unit/binshard.c | 154 + deps/jemalloc/test/unit/binshard.sh | 3 + deps/jemalloc/test/unit/bit_util.c | 111 + deps/jemalloc/test/unit/bitmap.c | 431 + deps/jemalloc/test/unit/ckh.c | 211 + deps/jemalloc/test/unit/decay.c | 605 + deps/jemalloc/test/unit/decay.sh | 3 + deps/jemalloc/test/unit/div.c | 29 + deps/jemalloc/test/unit/emitter.c | 469 + deps/jemalloc/test/unit/extent_quantize.c | 141 + deps/jemalloc/test/unit/extent_util.c | 269 + deps/jemalloc/test/unit/fork.c | 141 + deps/jemalloc/test/unit/hash.c | 173 + deps/jemalloc/test/unit/hook.c | 580 + deps/jemalloc/test/unit/huge.c | 108 + deps/jemalloc/test/unit/junk.c | 141 + deps/jemalloc/test/unit/junk.sh | 5 + deps/jemalloc/test/unit/junk_alloc.c | 1 + deps/jemalloc/test/unit/junk_alloc.sh | 5 + deps/jemalloc/test/unit/junk_free.c | 1 + deps/jemalloc/test/unit/junk_free.sh | 5 + deps/jemalloc/test/unit/log.c | 193 + deps/jemalloc/test/unit/mallctl.c | 888 ++ deps/jemalloc/test/unit/malloc_io.c | 258 + deps/jemalloc/test/unit/math.c | 390 + deps/jemalloc/test/unit/mq.c | 89 + deps/jemalloc/test/unit/mtx.c | 57 + deps/jemalloc/test/unit/nstime.c | 249 + deps/jemalloc/test/unit/pack.c | 166 + deps/jemalloc/test/unit/pack.sh | 4 + deps/jemalloc/test/unit/pages.c | 29 + deps/jemalloc/test/unit/ph.c | 318 + deps/jemalloc/test/unit/prng.c | 237 + deps/jemalloc/test/unit/prof_accum.c | 81 + deps/jemalloc/test/unit/prof_accum.sh | 5 + deps/jemalloc/test/unit/prof_active.c | 117 + deps/jemalloc/test/unit/prof_active.sh | 5 + deps/jemalloc/test/unit/prof_gdump.c | 74 + deps/jemalloc/test/unit/prof_gdump.sh | 6 + deps/jemalloc/test/unit/prof_idump.c | 42 + deps/jemalloc/test/unit/prof_idump.sh | 8 + deps/jemalloc/test/unit/prof_log.c | 148 + deps/jemalloc/test/unit/prof_log.sh | 5 + deps/jemalloc/test/unit/prof_reset.c | 286 + deps/jemalloc/test/unit/prof_reset.sh | 5 + deps/jemalloc/test/unit/prof_tctx.c | 46 + deps/jemalloc/test/unit/prof_tctx.sh | 5 + deps/jemalloc/test/unit/prof_thread_name.c | 120 + deps/jemalloc/test/unit/prof_thread_name.sh | 5 + deps/jemalloc/test/unit/ql.c | 204 + deps/jemalloc/test/unit/qr.c | 243 + deps/jemalloc/test/unit/rb.c | 355 + deps/jemalloc/test/unit/retained.c | 184 + deps/jemalloc/test/unit/rtree.c | 228 + deps/jemalloc/test/unit/safety_check.c | 156 + deps/jemalloc/test/unit/safety_check.sh | 5 + deps/jemalloc/test/unit/sc.c | 33 + deps/jemalloc/test/unit/seq.c | 95 + deps/jemalloc/test/unit/size_classes.c | 188 + deps/jemalloc/test/unit/slab.c | 33 + deps/jemalloc/test/unit/smoothstep.c | 102 + deps/jemalloc/test/unit/spin.c | 18 + deps/jemalloc/test/unit/stats.c | 374 + deps/jemalloc/test/unit/stats_print.c | 999 ++ deps/jemalloc/test/unit/test_hooks.c | 38 + deps/jemalloc/test/unit/ticker.c | 73 + deps/jemalloc/test/unit/tsd.c | 267 + deps/jemalloc/test/unit/witness.c | 280 + deps/jemalloc/test/unit/zero.c | 59 + deps/jemalloc/test/unit/zero.sh | 5 + deps/linenoise/.gitignore | 3 + deps/linenoise/Makefile | 21 + deps/linenoise/README.markdown | 247 + deps/linenoise/example.c | 79 + deps/linenoise/linenoise.c | 1227 ++ deps/linenoise/linenoise.h | 75 + deps/lua/COPYRIGHT | 34 + deps/lua/HISTORY | 183 + deps/lua/INSTALL | 99 + deps/lua/Makefile | 128 + deps/lua/README | 37 + deps/lua/doc/contents.html | 497 + deps/lua/doc/cover.png | Bin 0 -> 3305 bytes deps/lua/doc/logo.gif | Bin 0 -> 4232 bytes deps/lua/doc/lua.1 | 163 + deps/lua/doc/lua.css | 83 + deps/lua/doc/lua.html | 172 + deps/lua/doc/luac.1 | 136 + deps/lua/doc/luac.html | 145 + deps/lua/doc/manual.css | 24 + deps/lua/doc/manual.html | 8804 ++++++++++++ deps/lua/doc/readme.html | 40 + deps/lua/etc/Makefile | 44 + deps/lua/etc/README | 37 + deps/lua/etc/all.c | 38 + deps/lua/etc/lua.hpp | 9 + deps/lua/etc/lua.ico | Bin 0 -> 1078 bytes deps/lua/etc/lua.pc | 31 + deps/lua/etc/luavs.bat | 28 + deps/lua/etc/min.c | 39 + deps/lua/etc/noparser.c | 50 + deps/lua/etc/strict.lua | 41 + deps/lua/src/Makefile | 183 + deps/lua/src/fpconv.c | 205 + deps/lua/src/fpconv.h | 22 + deps/lua/src/lapi.c | 1109 ++ deps/lua/src/lapi.h | 16 + deps/lua/src/lauxlib.c | 652 + deps/lua/src/lauxlib.h | 174 + deps/lua/src/lbaselib.c | 653 + deps/lua/src/lcode.c | 831 ++ deps/lua/src/lcode.h | 76 + deps/lua/src/ldblib.c | 398 + deps/lua/src/ldebug.c | 637 + deps/lua/src/ldebug.h | 33 + deps/lua/src/ldo.c | 519 + deps/lua/src/ldo.h | 57 + deps/lua/src/ldump.c | 164 + deps/lua/src/lfunc.c | 174 + deps/lua/src/lfunc.h | 34 + deps/lua/src/lgc.c | 710 + deps/lua/src/lgc.h | 110 + deps/lua/src/linit.c | 38 + deps/lua/src/liolib.c | 556 + deps/lua/src/llex.c | 463 + deps/lua/src/llex.h | 81 + deps/lua/src/llimits.h | 128 + deps/lua/src/lmathlib.c | 263 + deps/lua/src/lmem.c | 86 + deps/lua/src/lmem.h | 49 + deps/lua/src/loadlib.c | 666 + deps/lua/src/lobject.c | 214 + deps/lua/src/lobject.h | 382 + deps/lua/src/lopcodes.c | 102 + deps/lua/src/lopcodes.h | 268 + deps/lua/src/loslib.c | 243 + deps/lua/src/lparser.c | 1339 ++ deps/lua/src/lparser.h | 82 + deps/lua/src/lstate.c | 214 + deps/lua/src/lstate.h | 169 + deps/lua/src/lstring.c | 111 + deps/lua/src/lstring.h | 31 + deps/lua/src/lstrlib.c | 871 ++ deps/lua/src/ltable.c | 589 + deps/lua/src/ltable.h | 40 + deps/lua/src/ltablib.c | 287 + deps/lua/src/ltm.c | 75 + deps/lua/src/ltm.h | 54 + deps/lua/src/lua.c | 392 + deps/lua/src/lua.h | 391 + deps/lua/src/lua_bit.c | 190 + deps/lua/src/lua_cjson.c | 1430 ++ deps/lua/src/lua_cmsgpack.c | 981 ++ deps/lua/src/lua_struct.c | 429 + deps/lua/src/luac.c | 200 + deps/lua/src/luaconf.h | 763 + deps/lua/src/lualib.h | 53 + deps/lua/src/lundump.c | 227 + deps/lua/src/lundump.h | 36 + deps/lua/src/lvm.c | 769 + deps/lua/src/lvm.h | 36 + deps/lua/src/lzio.c | 82 + deps/lua/src/lzio.h | 67 + deps/lua/src/print.c | 227 + deps/lua/src/strbuf.c | 198 + deps/lua/src/strbuf.h | 146 + deps/lua/test/README | 26 + deps/lua/test/bisect.lua | 27 + deps/lua/test/cf.lua | 16 + deps/lua/test/echo.lua | 5 + deps/lua/test/env.lua | 7 + deps/lua/test/factorial.lua | 32 + deps/lua/test/fib.lua | 40 + deps/lua/test/fibfor.lua | 13 + deps/lua/test/globals.lua | 13 + deps/lua/test/hello.lua | 3 + deps/lua/test/life.lua | 111 + deps/lua/test/luac.lua | 7 + deps/lua/test/printf.lua | 7 + deps/lua/test/readonly.lua | 12 + deps/lua/test/sieve.lua | 29 + deps/lua/test/sort.lua | 66 + deps/lua/test/table.lua | 12 + deps/lua/test/trace-calls.lua | 32 + deps/lua/test/trace-globals.lua | 38 + deps/lua/test/xd.lua | 14 + 513 files changed, 145053 insertions(+) create mode 100644 deps/Makefile create mode 100644 deps/README.md create mode 100644 deps/hdr_histogram/COPYING.txt create mode 100644 deps/hdr_histogram/LICENSE.txt create mode 100644 deps/hdr_histogram/Makefile create mode 100644 deps/hdr_histogram/README.md create mode 100644 deps/hdr_histogram/hdr_atomic.h create mode 100644 deps/hdr_histogram/hdr_histogram.c create mode 100644 deps/hdr_histogram/hdr_histogram.h create mode 100644 deps/hdr_histogram/hdr_redis_malloc.h create mode 100644 deps/hdr_histogram/hdr_tests.h create mode 100644 deps/hiredis/.github/workflows/build.yml create mode 100644 deps/hiredis/.gitignore create mode 100644 deps/hiredis/.travis.yml create mode 100644 deps/hiredis/CHANGELOG.md create mode 100644 deps/hiredis/CMakeLists.txt create mode 100644 deps/hiredis/COPYING create mode 100644 deps/hiredis/Makefile create mode 100644 deps/hiredis/README.md create mode 100644 deps/hiredis/adapters/ae.h create mode 100644 deps/hiredis/adapters/glib.h create mode 100644 deps/hiredis/adapters/ivykis.h create mode 100644 deps/hiredis/adapters/libev.h create mode 100644 deps/hiredis/adapters/libevent.h create mode 100644 deps/hiredis/adapters/libuv.h create mode 100644 deps/hiredis/adapters/macosx.h create mode 100644 deps/hiredis/adapters/qt.h create mode 100644 deps/hiredis/alloc.c create mode 100644 deps/hiredis/alloc.h create mode 100644 deps/hiredis/appveyor.yml create mode 100644 deps/hiredis/async.c create mode 100644 deps/hiredis/async.h create mode 100644 deps/hiredis/async_private.h create mode 100644 deps/hiredis/dict.c create mode 100644 deps/hiredis/dict.h create mode 100644 deps/hiredis/examples/CMakeLists.txt create mode 100644 deps/hiredis/examples/example-ae.c create mode 100644 deps/hiredis/examples/example-glib.c create mode 100644 deps/hiredis/examples/example-ivykis.c create mode 100644 deps/hiredis/examples/example-libev.c create mode 100644 deps/hiredis/examples/example-libevent-ssl.c create mode 100644 deps/hiredis/examples/example-libevent.c create mode 100644 deps/hiredis/examples/example-libuv.c create mode 100644 deps/hiredis/examples/example-macosx.c create mode 100644 deps/hiredis/examples/example-push.c create mode 100644 deps/hiredis/examples/example-qt.cpp create mode 100644 deps/hiredis/examples/example-qt.h create mode 100644 deps/hiredis/examples/example-ssl.c create mode 100644 deps/hiredis/examples/example.c create mode 100644 deps/hiredis/fmacros.h create mode 100644 deps/hiredis/fuzzing/format_command_fuzzer.c create mode 100644 deps/hiredis/hiredis-config.cmake.in create mode 100644 deps/hiredis/hiredis.c create mode 100644 deps/hiredis/hiredis.h create mode 100644 deps/hiredis/hiredis.pc.in create mode 100644 deps/hiredis/hiredis.targets create mode 100644 deps/hiredis/hiredis_ssl-config.cmake.in create mode 100644 deps/hiredis/hiredis_ssl.h create mode 100644 deps/hiredis/hiredis_ssl.pc.in create mode 100644 deps/hiredis/net.c create mode 100644 deps/hiredis/net.h create mode 100644 deps/hiredis/read.c create mode 100644 deps/hiredis/read.h create mode 100644 deps/hiredis/sds.c create mode 100644 deps/hiredis/sds.h create mode 100644 deps/hiredis/sdsalloc.h create mode 100644 deps/hiredis/sdscompat.h create mode 100644 deps/hiredis/sockcompat.c create mode 100644 deps/hiredis/sockcompat.h create mode 100644 deps/hiredis/ssl.c create mode 100644 deps/hiredis/test.c create mode 100755 deps/hiredis/test.sh create mode 100644 deps/hiredis/win32.h create mode 100644 deps/jemalloc/.appveyor.yml create mode 100644 deps/jemalloc/.autom4te.cfg create mode 100644 deps/jemalloc/.cirrus.yml create mode 100644 deps/jemalloc/.gitattributes create mode 100644 deps/jemalloc/.gitignore create mode 100644 deps/jemalloc/.travis.yml create mode 100644 deps/jemalloc/COPYING create mode 100644 deps/jemalloc/ChangeLog create mode 100644 deps/jemalloc/INSTALL.md create mode 100644 deps/jemalloc/Makefile.in create mode 100644 deps/jemalloc/README create mode 100644 deps/jemalloc/TUNING.md create mode 100644 deps/jemalloc/VERSION create mode 100755 deps/jemalloc/autogen.sh create mode 100644 deps/jemalloc/bin/jemalloc-config.in create mode 100644 deps/jemalloc/bin/jemalloc.sh.in create mode 100644 deps/jemalloc/bin/jeprof.in create mode 100755 deps/jemalloc/build-aux/config.guess create mode 100755 deps/jemalloc/build-aux/config.sub create mode 100755 deps/jemalloc/build-aux/install-sh create mode 100644 deps/jemalloc/config.stamp.in create mode 100755 deps/jemalloc/configure create mode 100644 deps/jemalloc/configure.ac create mode 100644 deps/jemalloc/doc/html.xsl.in create mode 100644 deps/jemalloc/doc/jemalloc.xml.in create mode 100644 deps/jemalloc/doc/manpages.xsl.in create mode 100644 deps/jemalloc/doc/stylesheet.xsl create mode 100644 deps/jemalloc/include/jemalloc/internal/arena_externs.h create mode 100644 deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h create mode 100644 deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h create mode 100644 deps/jemalloc/include/jemalloc/internal/arena_stats.h create mode 100644 deps/jemalloc/include/jemalloc/internal/arena_structs_a.h create mode 100644 deps/jemalloc/include/jemalloc/internal/arena_structs_b.h create mode 100644 deps/jemalloc/include/jemalloc/internal/arena_types.h create mode 100644 deps/jemalloc/include/jemalloc/internal/assert.h create mode 100644 deps/jemalloc/include/jemalloc/internal/atomic.h create mode 100644 deps/jemalloc/include/jemalloc/internal/atomic_c11.h create mode 100644 deps/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h create mode 100644 deps/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h create mode 100644 deps/jemalloc/include/jemalloc/internal/atomic_msvc.h create mode 100644 deps/jemalloc/include/jemalloc/internal/background_thread_externs.h create mode 100644 deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h create mode 100644 deps/jemalloc/include/jemalloc/internal/background_thread_structs.h create mode 100644 deps/jemalloc/include/jemalloc/internal/base_externs.h create mode 100644 deps/jemalloc/include/jemalloc/internal/base_inlines.h create mode 100644 deps/jemalloc/include/jemalloc/internal/base_structs.h create mode 100644 deps/jemalloc/include/jemalloc/internal/base_types.h create mode 100644 deps/jemalloc/include/jemalloc/internal/bin.h create mode 100644 deps/jemalloc/include/jemalloc/internal/bin_stats.h create mode 100644 deps/jemalloc/include/jemalloc/internal/bin_types.h create mode 100644 deps/jemalloc/include/jemalloc/internal/bit_util.h create mode 100644 deps/jemalloc/include/jemalloc/internal/bitmap.h create mode 100644 deps/jemalloc/include/jemalloc/internal/cache_bin.h create mode 100644 deps/jemalloc/include/jemalloc/internal/ckh.h create mode 100644 deps/jemalloc/include/jemalloc/internal/ctl.h create mode 100644 deps/jemalloc/include/jemalloc/internal/div.h create mode 100644 deps/jemalloc/include/jemalloc/internal/emitter.h create mode 100644 deps/jemalloc/include/jemalloc/internal/extent_dss.h create mode 100644 deps/jemalloc/include/jemalloc/internal/extent_externs.h create mode 100644 deps/jemalloc/include/jemalloc/internal/extent_inlines.h create mode 100644 deps/jemalloc/include/jemalloc/internal/extent_mmap.h create mode 100644 deps/jemalloc/include/jemalloc/internal/extent_structs.h create mode 100644 deps/jemalloc/include/jemalloc/internal/extent_types.h create mode 100644 deps/jemalloc/include/jemalloc/internal/hash.h create mode 100644 deps/jemalloc/include/jemalloc/internal/hook.h create mode 100644 deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h create mode 100644 deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in create mode 100644 deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h create mode 100644 deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h create mode 100644 deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h create mode 100644 deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h create mode 100644 deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h create mode 100644 deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h create mode 100644 deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h create mode 100644 deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in create mode 100644 deps/jemalloc/include/jemalloc/internal/large_externs.h create mode 100644 deps/jemalloc/include/jemalloc/internal/log.h create mode 100644 deps/jemalloc/include/jemalloc/internal/malloc_io.h create mode 100644 deps/jemalloc/include/jemalloc/internal/mutex.h create mode 100644 deps/jemalloc/include/jemalloc/internal/mutex_pool.h create mode 100644 deps/jemalloc/include/jemalloc/internal/mutex_prof.h create mode 100644 deps/jemalloc/include/jemalloc/internal/nstime.h create mode 100644 deps/jemalloc/include/jemalloc/internal/pages.h create mode 100644 deps/jemalloc/include/jemalloc/internal/ph.h create mode 100755 deps/jemalloc/include/jemalloc/internal/private_namespace.sh create mode 100755 deps/jemalloc/include/jemalloc/internal/private_symbols.sh create mode 100644 deps/jemalloc/include/jemalloc/internal/prng.h create mode 100644 deps/jemalloc/include/jemalloc/internal/prof_externs.h create mode 100644 deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h create mode 100644 deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h create mode 100644 deps/jemalloc/include/jemalloc/internal/prof_structs.h create mode 100644 deps/jemalloc/include/jemalloc/internal/prof_types.h create mode 100755 deps/jemalloc/include/jemalloc/internal/public_namespace.sh create mode 100755 deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh create mode 100644 deps/jemalloc/include/jemalloc/internal/ql.h create mode 100644 deps/jemalloc/include/jemalloc/internal/qr.h create mode 100644 deps/jemalloc/include/jemalloc/internal/quantum.h create mode 100644 deps/jemalloc/include/jemalloc/internal/rb.h create mode 100644 deps/jemalloc/include/jemalloc/internal/rtree.h create mode 100644 deps/jemalloc/include/jemalloc/internal/rtree_tsd.h create mode 100644 deps/jemalloc/include/jemalloc/internal/safety_check.h create mode 100644 deps/jemalloc/include/jemalloc/internal/sc.h create mode 100644 deps/jemalloc/include/jemalloc/internal/seq.h create mode 100644 deps/jemalloc/include/jemalloc/internal/smoothstep.h create mode 100755 deps/jemalloc/include/jemalloc/internal/smoothstep.sh create mode 100644 deps/jemalloc/include/jemalloc/internal/spin.h create mode 100644 deps/jemalloc/include/jemalloc/internal/stats.h create mode 100644 deps/jemalloc/include/jemalloc/internal/sz.h create mode 100644 deps/jemalloc/include/jemalloc/internal/tcache_externs.h create mode 100644 deps/jemalloc/include/jemalloc/internal/tcache_inlines.h create mode 100644 deps/jemalloc/include/jemalloc/internal/tcache_structs.h create mode 100644 deps/jemalloc/include/jemalloc/internal/tcache_types.h create mode 100644 deps/jemalloc/include/jemalloc/internal/test_hooks.h create mode 100644 deps/jemalloc/include/jemalloc/internal/ticker.h create mode 100644 deps/jemalloc/include/jemalloc/internal/tsd.h create mode 100644 deps/jemalloc/include/jemalloc/internal/tsd_generic.h create mode 100644 deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h create mode 100644 deps/jemalloc/include/jemalloc/internal/tsd_tls.h create mode 100644 deps/jemalloc/include/jemalloc/internal/tsd_types.h create mode 100644 deps/jemalloc/include/jemalloc/internal/tsd_win.h create mode 100644 deps/jemalloc/include/jemalloc/internal/util.h create mode 100644 deps/jemalloc/include/jemalloc/internal/witness.h create mode 100755 deps/jemalloc/include/jemalloc/jemalloc.sh create mode 100644 deps/jemalloc/include/jemalloc/jemalloc_defs.h.in create mode 100644 deps/jemalloc/include/jemalloc/jemalloc_macros.h.in create mode 100755 deps/jemalloc/include/jemalloc/jemalloc_mangle.sh create mode 100644 deps/jemalloc/include/jemalloc/jemalloc_protos.h.in create mode 100755 deps/jemalloc/include/jemalloc/jemalloc_rename.sh create mode 100644 deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in create mode 100644 deps/jemalloc/include/msvc_compat/C99/stdbool.h create mode 100644 deps/jemalloc/include/msvc_compat/C99/stdint.h create mode 100644 deps/jemalloc/include/msvc_compat/strings.h create mode 100644 deps/jemalloc/include/msvc_compat/windows_extra.h create mode 100644 deps/jemalloc/jemalloc.pc.in create mode 100644 deps/jemalloc/m4/ax_cxx_compile_stdcxx.m4 create mode 100644 deps/jemalloc/msvc/ReadMe.txt create mode 100644 deps/jemalloc/msvc/jemalloc_vc2015.sln create mode 100644 deps/jemalloc/msvc/jemalloc_vc2017.sln create mode 100644 deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj create mode 100644 deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters create mode 100644 deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj create mode 100644 deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters create mode 100644 deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj create mode 100644 deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters create mode 100644 deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj create mode 100644 deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters create mode 100644 deps/jemalloc/msvc/test_threads/test_threads.cpp create mode 100644 deps/jemalloc/msvc/test_threads/test_threads.h create mode 100644 deps/jemalloc/msvc/test_threads/test_threads_main.cpp create mode 100755 deps/jemalloc/run_tests.sh create mode 100755 deps/jemalloc/scripts/gen_run_tests.py create mode 100755 deps/jemalloc/scripts/gen_travis.py create mode 100644 deps/jemalloc/src/arena.c create mode 100644 deps/jemalloc/src/background_thread.c create mode 100644 deps/jemalloc/src/base.c create mode 100644 deps/jemalloc/src/bin.c create mode 100644 deps/jemalloc/src/bitmap.c create mode 100644 deps/jemalloc/src/ckh.c create mode 100644 deps/jemalloc/src/ctl.c create mode 100644 deps/jemalloc/src/div.c create mode 100644 deps/jemalloc/src/extent.c create mode 100644 deps/jemalloc/src/extent_dss.c create mode 100644 deps/jemalloc/src/extent_mmap.c create mode 100644 deps/jemalloc/src/hash.c create mode 100644 deps/jemalloc/src/hook.c create mode 100644 deps/jemalloc/src/jemalloc.c create mode 100644 deps/jemalloc/src/jemalloc_cpp.cpp create mode 100644 deps/jemalloc/src/large.c create mode 100644 deps/jemalloc/src/log.c create mode 100644 deps/jemalloc/src/malloc_io.c create mode 100644 deps/jemalloc/src/mutex.c create mode 100644 deps/jemalloc/src/mutex_pool.c create mode 100644 deps/jemalloc/src/nstime.c create mode 100644 deps/jemalloc/src/pages.c create mode 100644 deps/jemalloc/src/prng.c create mode 100644 deps/jemalloc/src/prof.c create mode 100644 deps/jemalloc/src/rtree.c create mode 100644 deps/jemalloc/src/safety_check.c create mode 100644 deps/jemalloc/src/sc.c create mode 100644 deps/jemalloc/src/stats.c create mode 100644 deps/jemalloc/src/sz.c create mode 100644 deps/jemalloc/src/tcache.c create mode 100644 deps/jemalloc/src/test_hooks.c create mode 100644 deps/jemalloc/src/ticker.c create mode 100644 deps/jemalloc/src/tsd.c create mode 100644 deps/jemalloc/src/witness.c create mode 100644 deps/jemalloc/src/zone.c create mode 100644 deps/jemalloc/test/include/test/SFMT-alti.h create mode 100644 deps/jemalloc/test/include/test/SFMT-params.h create mode 100644 deps/jemalloc/test/include/test/SFMT-params11213.h create mode 100644 deps/jemalloc/test/include/test/SFMT-params1279.h create mode 100644 deps/jemalloc/test/include/test/SFMT-params132049.h create mode 100644 deps/jemalloc/test/include/test/SFMT-params19937.h create mode 100644 deps/jemalloc/test/include/test/SFMT-params216091.h create mode 100644 deps/jemalloc/test/include/test/SFMT-params2281.h create mode 100644 deps/jemalloc/test/include/test/SFMT-params4253.h create mode 100644 deps/jemalloc/test/include/test/SFMT-params44497.h create mode 100644 deps/jemalloc/test/include/test/SFMT-params607.h create mode 100644 deps/jemalloc/test/include/test/SFMT-params86243.h create mode 100644 deps/jemalloc/test/include/test/SFMT-sse2.h create mode 100644 deps/jemalloc/test/include/test/SFMT.h create mode 100644 deps/jemalloc/test/include/test/btalloc.h create mode 100644 deps/jemalloc/test/include/test/extent_hooks.h create mode 100644 deps/jemalloc/test/include/test/jemalloc_test.h.in create mode 100644 deps/jemalloc/test/include/test/jemalloc_test_defs.h.in create mode 100644 deps/jemalloc/test/include/test/math.h create mode 100644 deps/jemalloc/test/include/test/mq.h create mode 100644 deps/jemalloc/test/include/test/mtx.h create mode 100644 deps/jemalloc/test/include/test/test.h create mode 100644 deps/jemalloc/test/include/test/thd.h create mode 100644 deps/jemalloc/test/include/test/timer.h create mode 100644 deps/jemalloc/test/integration/MALLOCX_ARENA.c create mode 100644 deps/jemalloc/test/integration/aligned_alloc.c create mode 100644 deps/jemalloc/test/integration/allocated.c create mode 100644 deps/jemalloc/test/integration/cpp/basic.cpp create mode 100644 deps/jemalloc/test/integration/extent.c create mode 100644 deps/jemalloc/test/integration/extent.sh create mode 100644 deps/jemalloc/test/integration/malloc.c create mode 100644 deps/jemalloc/test/integration/mallocx.c create mode 100644 deps/jemalloc/test/integration/mallocx.sh create mode 100644 deps/jemalloc/test/integration/overflow.c create mode 100644 deps/jemalloc/test/integration/posix_memalign.c create mode 100644 deps/jemalloc/test/integration/rallocx.c create mode 100644 deps/jemalloc/test/integration/sdallocx.c create mode 100644 deps/jemalloc/test/integration/slab_sizes.c create mode 100644 deps/jemalloc/test/integration/slab_sizes.sh create mode 100644 deps/jemalloc/test/integration/smallocx.c create mode 100644 deps/jemalloc/test/integration/smallocx.sh create mode 100644 deps/jemalloc/test/integration/thread_arena.c create mode 100644 deps/jemalloc/test/integration/thread_tcache_enabled.c create mode 100644 deps/jemalloc/test/integration/xallocx.c create mode 100644 deps/jemalloc/test/integration/xallocx.sh create mode 100644 deps/jemalloc/test/src/SFMT.c create mode 100644 deps/jemalloc/test/src/btalloc.c create mode 100644 deps/jemalloc/test/src/btalloc_0.c create mode 100644 deps/jemalloc/test/src/btalloc_1.c create mode 100644 deps/jemalloc/test/src/math.c create mode 100644 deps/jemalloc/test/src/mq.c create mode 100644 deps/jemalloc/test/src/mtx.c create mode 100644 deps/jemalloc/test/src/test.c create mode 100644 deps/jemalloc/test/src/thd.c create mode 100644 deps/jemalloc/test/src/timer.c create mode 100644 deps/jemalloc/test/stress/hookbench.c create mode 100644 deps/jemalloc/test/stress/microbench.c create mode 100644 deps/jemalloc/test/test.sh.in create mode 100644 deps/jemalloc/test/unit/SFMT.c create mode 100644 deps/jemalloc/test/unit/a0.c create mode 100644 deps/jemalloc/test/unit/arena_reset.c create mode 100644 deps/jemalloc/test/unit/arena_reset_prof.c create mode 100644 deps/jemalloc/test/unit/arena_reset_prof.sh create mode 100644 deps/jemalloc/test/unit/atomic.c create mode 100644 deps/jemalloc/test/unit/background_thread.c create mode 100644 deps/jemalloc/test/unit/background_thread_enable.c create mode 100644 deps/jemalloc/test/unit/base.c create mode 100644 deps/jemalloc/test/unit/binshard.c create mode 100644 deps/jemalloc/test/unit/binshard.sh create mode 100644 deps/jemalloc/test/unit/bit_util.c create mode 100644 deps/jemalloc/test/unit/bitmap.c create mode 100644 deps/jemalloc/test/unit/ckh.c create mode 100644 deps/jemalloc/test/unit/decay.c create mode 100644 deps/jemalloc/test/unit/decay.sh create mode 100644 deps/jemalloc/test/unit/div.c create mode 100644 deps/jemalloc/test/unit/emitter.c create mode 100644 deps/jemalloc/test/unit/extent_quantize.c create mode 100644 deps/jemalloc/test/unit/extent_util.c create mode 100644 deps/jemalloc/test/unit/fork.c create mode 100644 deps/jemalloc/test/unit/hash.c create mode 100644 deps/jemalloc/test/unit/hook.c create mode 100644 deps/jemalloc/test/unit/huge.c create mode 100644 deps/jemalloc/test/unit/junk.c create mode 100644 deps/jemalloc/test/unit/junk.sh create mode 100644 deps/jemalloc/test/unit/junk_alloc.c create mode 100644 deps/jemalloc/test/unit/junk_alloc.sh create mode 100644 deps/jemalloc/test/unit/junk_free.c create mode 100644 deps/jemalloc/test/unit/junk_free.sh create mode 100644 deps/jemalloc/test/unit/log.c create mode 100644 deps/jemalloc/test/unit/mallctl.c create mode 100644 deps/jemalloc/test/unit/malloc_io.c create mode 100644 deps/jemalloc/test/unit/math.c create mode 100644 deps/jemalloc/test/unit/mq.c create mode 100644 deps/jemalloc/test/unit/mtx.c create mode 100644 deps/jemalloc/test/unit/nstime.c create mode 100644 deps/jemalloc/test/unit/pack.c create mode 100644 deps/jemalloc/test/unit/pack.sh create mode 100644 deps/jemalloc/test/unit/pages.c create mode 100644 deps/jemalloc/test/unit/ph.c create mode 100644 deps/jemalloc/test/unit/prng.c create mode 100644 deps/jemalloc/test/unit/prof_accum.c create mode 100644 deps/jemalloc/test/unit/prof_accum.sh create mode 100644 deps/jemalloc/test/unit/prof_active.c create mode 100644 deps/jemalloc/test/unit/prof_active.sh create mode 100644 deps/jemalloc/test/unit/prof_gdump.c create mode 100644 deps/jemalloc/test/unit/prof_gdump.sh create mode 100644 deps/jemalloc/test/unit/prof_idump.c create mode 100644 deps/jemalloc/test/unit/prof_idump.sh create mode 100644 deps/jemalloc/test/unit/prof_log.c create mode 100644 deps/jemalloc/test/unit/prof_log.sh create mode 100644 deps/jemalloc/test/unit/prof_reset.c create mode 100644 deps/jemalloc/test/unit/prof_reset.sh create mode 100644 deps/jemalloc/test/unit/prof_tctx.c create mode 100644 deps/jemalloc/test/unit/prof_tctx.sh create mode 100644 deps/jemalloc/test/unit/prof_thread_name.c create mode 100644 deps/jemalloc/test/unit/prof_thread_name.sh create mode 100644 deps/jemalloc/test/unit/ql.c create mode 100644 deps/jemalloc/test/unit/qr.c create mode 100644 deps/jemalloc/test/unit/rb.c create mode 100644 deps/jemalloc/test/unit/retained.c create mode 100644 deps/jemalloc/test/unit/rtree.c create mode 100644 deps/jemalloc/test/unit/safety_check.c create mode 100644 deps/jemalloc/test/unit/safety_check.sh create mode 100644 deps/jemalloc/test/unit/sc.c create mode 100644 deps/jemalloc/test/unit/seq.c create mode 100644 deps/jemalloc/test/unit/size_classes.c create mode 100644 deps/jemalloc/test/unit/slab.c create mode 100644 deps/jemalloc/test/unit/smoothstep.c create mode 100644 deps/jemalloc/test/unit/spin.c create mode 100644 deps/jemalloc/test/unit/stats.c create mode 100644 deps/jemalloc/test/unit/stats_print.c create mode 100644 deps/jemalloc/test/unit/test_hooks.c create mode 100644 deps/jemalloc/test/unit/ticker.c create mode 100644 deps/jemalloc/test/unit/tsd.c create mode 100644 deps/jemalloc/test/unit/witness.c create mode 100644 deps/jemalloc/test/unit/zero.c create mode 100644 deps/jemalloc/test/unit/zero.sh create mode 100644 deps/linenoise/.gitignore create mode 100644 deps/linenoise/Makefile create mode 100644 deps/linenoise/README.markdown create mode 100644 deps/linenoise/example.c create mode 100644 deps/linenoise/linenoise.c create mode 100644 deps/linenoise/linenoise.h create mode 100644 deps/lua/COPYRIGHT create mode 100644 deps/lua/HISTORY create mode 100644 deps/lua/INSTALL create mode 100644 deps/lua/Makefile create mode 100644 deps/lua/README create mode 100644 deps/lua/doc/contents.html create mode 100644 deps/lua/doc/cover.png create mode 100644 deps/lua/doc/logo.gif create mode 100644 deps/lua/doc/lua.1 create mode 100644 deps/lua/doc/lua.css create mode 100644 deps/lua/doc/lua.html create mode 100644 deps/lua/doc/luac.1 create mode 100644 deps/lua/doc/luac.html create mode 100644 deps/lua/doc/manual.css create mode 100644 deps/lua/doc/manual.html create mode 100644 deps/lua/doc/readme.html create mode 100644 deps/lua/etc/Makefile create mode 100644 deps/lua/etc/README create mode 100644 deps/lua/etc/all.c create mode 100644 deps/lua/etc/lua.hpp create mode 100644 deps/lua/etc/lua.ico create mode 100644 deps/lua/etc/lua.pc create mode 100644 deps/lua/etc/luavs.bat create mode 100644 deps/lua/etc/min.c create mode 100644 deps/lua/etc/noparser.c create mode 100644 deps/lua/etc/strict.lua create mode 100644 deps/lua/src/Makefile create mode 100644 deps/lua/src/fpconv.c create mode 100644 deps/lua/src/fpconv.h create mode 100644 deps/lua/src/lapi.c create mode 100644 deps/lua/src/lapi.h create mode 100644 deps/lua/src/lauxlib.c create mode 100644 deps/lua/src/lauxlib.h create mode 100644 deps/lua/src/lbaselib.c create mode 100644 deps/lua/src/lcode.c create mode 100644 deps/lua/src/lcode.h create mode 100644 deps/lua/src/ldblib.c create mode 100644 deps/lua/src/ldebug.c create mode 100644 deps/lua/src/ldebug.h create mode 100644 deps/lua/src/ldo.c create mode 100644 deps/lua/src/ldo.h create mode 100644 deps/lua/src/ldump.c create mode 100644 deps/lua/src/lfunc.c create mode 100644 deps/lua/src/lfunc.h create mode 100644 deps/lua/src/lgc.c create mode 100644 deps/lua/src/lgc.h create mode 100644 deps/lua/src/linit.c create mode 100644 deps/lua/src/liolib.c create mode 100644 deps/lua/src/llex.c create mode 100644 deps/lua/src/llex.h create mode 100644 deps/lua/src/llimits.h create mode 100644 deps/lua/src/lmathlib.c create mode 100644 deps/lua/src/lmem.c create mode 100644 deps/lua/src/lmem.h create mode 100644 deps/lua/src/loadlib.c create mode 100644 deps/lua/src/lobject.c create mode 100644 deps/lua/src/lobject.h create mode 100644 deps/lua/src/lopcodes.c create mode 100644 deps/lua/src/lopcodes.h create mode 100644 deps/lua/src/loslib.c create mode 100644 deps/lua/src/lparser.c create mode 100644 deps/lua/src/lparser.h create mode 100644 deps/lua/src/lstate.c create mode 100644 deps/lua/src/lstate.h create mode 100644 deps/lua/src/lstring.c create mode 100644 deps/lua/src/lstring.h create mode 100644 deps/lua/src/lstrlib.c create mode 100644 deps/lua/src/ltable.c create mode 100644 deps/lua/src/ltable.h create mode 100644 deps/lua/src/ltablib.c create mode 100644 deps/lua/src/ltm.c create mode 100644 deps/lua/src/ltm.h create mode 100644 deps/lua/src/lua.c create mode 100644 deps/lua/src/lua.h create mode 100644 deps/lua/src/lua_bit.c create mode 100644 deps/lua/src/lua_cjson.c create mode 100644 deps/lua/src/lua_cmsgpack.c create mode 100644 deps/lua/src/lua_struct.c create mode 100644 deps/lua/src/luac.c create mode 100644 deps/lua/src/luaconf.h create mode 100644 deps/lua/src/lualib.h create mode 100644 deps/lua/src/lundump.c create mode 100644 deps/lua/src/lundump.h create mode 100644 deps/lua/src/lvm.c create mode 100644 deps/lua/src/lvm.h create mode 100644 deps/lua/src/lzio.c create mode 100644 deps/lua/src/lzio.h create mode 100644 deps/lua/src/print.c create mode 100644 deps/lua/src/strbuf.c create mode 100644 deps/lua/src/strbuf.h create mode 100644 deps/lua/test/README create mode 100644 deps/lua/test/bisect.lua create mode 100644 deps/lua/test/cf.lua create mode 100644 deps/lua/test/echo.lua create mode 100644 deps/lua/test/env.lua create mode 100644 deps/lua/test/factorial.lua create mode 100644 deps/lua/test/fib.lua create mode 100644 deps/lua/test/fibfor.lua create mode 100644 deps/lua/test/globals.lua create mode 100644 deps/lua/test/hello.lua create mode 100644 deps/lua/test/life.lua create mode 100644 deps/lua/test/luac.lua create mode 100644 deps/lua/test/printf.lua create mode 100644 deps/lua/test/readonly.lua create mode 100644 deps/lua/test/sieve.lua create mode 100644 deps/lua/test/sort.lua create mode 100644 deps/lua/test/table.lua create mode 100644 deps/lua/test/trace-calls.lua create mode 100644 deps/lua/test/trace-globals.lua create mode 100644 deps/lua/test/xd.lua (limited to 'deps') diff --git a/deps/Makefile b/deps/Makefile new file mode 100644 index 0000000..83ac557 --- /dev/null +++ b/deps/Makefile @@ -0,0 +1,111 @@ +# Redis dependency Makefile + +uname_S:= $(shell sh -c 'uname -s 2>/dev/null || echo not') + +LUA_DEBUG?=no +LUA_COVERAGE?=no + +CCCOLOR="\033[34m" +LINKCOLOR="\033[34;1m" +SRCCOLOR="\033[33m" +BINCOLOR="\033[37;1m" +MAKECOLOR="\033[32;1m" +ENDCOLOR="\033[0m" + +default: + @echo "Explicit target required" + +.PHONY: default + +# Prerequisites target +.make-prerequisites: + @touch $@ + +# Clean everything when CFLAGS is different +ifneq ($(shell sh -c '[ -f .make-cflags ] && cat .make-cflags || echo none'), $(CFLAGS)) +.make-cflags: distclean + -(echo "$(CFLAGS)" > .make-cflags) +.make-prerequisites: .make-cflags +endif + +# Clean everything when LDFLAGS is different +ifneq ($(shell sh -c '[ -f .make-ldflags ] && cat .make-ldflags || echo none'), $(LDFLAGS)) +.make-ldflags: distclean + -(echo "$(LDFLAGS)" > .make-ldflags) +.make-prerequisites: .make-ldflags +endif + +distclean: + -(cd hiredis && $(MAKE) clean) > /dev/null || true + -(cd linenoise && $(MAKE) clean) > /dev/null || true + -(cd lua && $(MAKE) clean) > /dev/null || true + -(cd jemalloc && [ -f Makefile ] && $(MAKE) distclean) > /dev/null || true + -(cd hdr_histogram && $(MAKE) clean) > /dev/null || true + -(rm -f .make-*) + +.PHONY: distclean + +ifeq ($(BUILD_TLS),yes) + HIREDIS_MAKE_FLAGS = USE_SSL=1 +endif + +hiredis: .make-prerequisites + @printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) + cd hiredis && $(MAKE) static $(HIREDIS_MAKE_FLAGS) + +.PHONY: hiredis + +linenoise: .make-prerequisites + @printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) + cd linenoise && $(MAKE) + +.PHONY: linenoise + +hdr_histogram: .make-prerequisites + @printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) + cd hdr_histogram && $(MAKE) + +.PHONY: hdr_histogram + +ifeq ($(uname_S),SunOS) + # Make isinf() available + LUA_CFLAGS= -D__C99FEATURES__=1 +endif + +LUA_CFLAGS+= -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP $(CFLAGS) +LUA_LDFLAGS+= $(LDFLAGS) +ifeq ($(LUA_DEBUG),yes) + LUA_CFLAGS+= -O0 -g -DLUA_USE_APICHECK +else + LUA_CFLAGS+= -O2 +endif +ifeq ($(LUA_COVERAGE),yes) + LUA_CFLAGS += -fprofile-arcs -ftest-coverage + LUA_LDFLAGS += -fprofile-arcs -ftest-coverage +endif + +# lua's Makefile defines AR="ar rcu", which is unusual, and makes it more +# challenging to cross-compile lua (and redis). These defines make it easier +# to fit redis into cross-compilation environments, which typically set AR. +AR=ar +ARFLAGS=rc + +lua: .make-prerequisites + @printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) + cd lua/src && $(MAKE) all CFLAGS="$(LUA_CFLAGS)" MYLDFLAGS="$(LUA_LDFLAGS)" AR="$(AR) $(ARFLAGS)" + +.PHONY: lua + +JEMALLOC_CFLAGS= -std=gnu99 -Wall -pipe -g3 -O3 -funroll-loops $(CFLAGS) +JEMALLOC_LDFLAGS= $(LDFLAGS) + +ifneq ($(DEB_HOST_GNU_TYPE),) +JEMALLOC_CONFIGURE_OPTS += --host=$(DEB_HOST_GNU_TYPE) +endif + +jemalloc: .make-prerequisites + @printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) + cd jemalloc && ./configure --with-version=5.2.1-0-g0 --with-lg-quantum=3 --with-jemalloc-prefix=je_ CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)" $(JEMALLOC_CONFIGURE_OPTS) + cd jemalloc && $(MAKE) CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)" lib/libjemalloc.a + +.PHONY: jemalloc diff --git a/deps/README.md b/deps/README.md new file mode 100644 index 0000000..bd1280e --- /dev/null +++ b/deps/README.md @@ -0,0 +1,107 @@ +This directory contains all Redis dependencies, except for the libc that +should be provided by the operating system. + +* **Jemalloc** is our memory allocator, used as replacement for libc malloc on Linux by default. It has good performances and excellent fragmentation behavior. This component is upgraded from time to time. +* **hiredis** is the official C client library for Redis. It is used by redis-cli, redis-benchmark and Redis Sentinel. It is part of the Redis official ecosystem but is developed externally from the Redis repository, so we just upgrade it as needed. +* **linenoise** is a readline replacement. It is developed by the same authors of Redis but is managed as a separated project and updated as needed. +* **lua** is Lua 5.1 with minor changes for security and additional libraries. +* **hdr_histogram** Used for per-command latency tracking histograms. + +How to upgrade the above dependencies +=== + +Jemalloc +--- + +Jemalloc is modified with changes that allow us to implement the Redis +active defragmentation logic. However this feature of Redis is not mandatory +and Redis is able to understand if the Jemalloc version it is compiled +against supports such Redis-specific modifications. So in theory, if you +are not interested in the active defragmentation, you can replace Jemalloc +just following these steps: + +1. Remove the jemalloc directory. +2. Substitute it with the new jemalloc source tree. +3. Edit the Makefile located in the same directory as the README you are + reading, and change the --with-version in the Jemalloc configure script + options with the version you are using. This is required because otherwise + Jemalloc configuration script is broken and will not work nested in another + git repository. + +However note that we change Jemalloc settings via the `configure` script of Jemalloc using the `--with-lg-quantum` option, setting it to the value of 3 instead of 4. This provides us with more size classes that better suit the Redis data structures, in order to gain memory efficiency. + +If you want to upgrade Jemalloc while also providing support for +active defragmentation, in addition to the above steps you need to perform +the following additional steps: + +5. In Jemalloc tree, file `include/jemalloc/jemalloc_macros.h.in`, make sure + to add `#define JEMALLOC_FRAG_HINT`. +6. Implement the function `je_get_defrag_hint()` inside `src/jemalloc.c`. You + can see how it is implemented in the current Jemalloc source tree shipped + with Redis, and rewrite it according to the new Jemalloc internals, if they + changed, otherwise you could just copy the old implementation if you are + upgrading just to a similar version of Jemalloc. + +#### Updating/upgrading jemalloc + +The jemalloc directory is pulled as a subtree from the upstream jemalloc github repo. To update it you should run from the project root: + +1. `git subtree pull --prefix deps/jemalloc https://github.com/jemalloc/jemalloc.git --squash`
+This should hopefully merge the local changes into the new version. +2. In case any conflicts arise (due to our changes) you'll need to resolve them and commit. +3. Reconfigure jemalloc:
+```sh +rm deps/jemalloc/VERSION deps/jemalloc/configure +cd deps/jemalloc +./autogen.sh --with-version=-0-g0 +``` +4. Update jemalloc's version in `deps/Makefile`: search for "`--with-version=-0-g0`" and update it accordingly. +5. Commit the changes (VERSION,configure,Makefile). + +Hiredis +--- + +Hiredis uses the SDS string library, that must be the same version used inside Redis itself. Hiredis is also very critical for Sentinel. Historically Redis often used forked versions of hiredis in a way or the other. In order to upgrade it is advised to take a lot of care: + +1. Check with diff if hiredis API changed and what impact it could have in Redis. +2. Make sure that the SDS library inside Hiredis and inside Redis are compatible. +3. After the upgrade, run the Redis Sentinel test. +4. Check manually that redis-cli and redis-benchmark behave as expected, since we have no tests for CLI utilities currently. + +Linenoise +--- + +Linenoise is rarely upgraded as needed. The upgrade process is trivial since +Redis uses a non modified version of linenoise, so to upgrade just do the +following: + +1. Remove the linenoise directory. +2. Substitute it with the new linenoise source tree. + +Lua +--- + +We use Lua 5.1 and no upgrade is planned currently, since we don't want to break +Lua scripts for new Lua features: in the context of Redis Lua scripts the +capabilities of 5.1 are usually more than enough, the release is rock solid, +and we definitely don't want to break old scripts. + +So upgrading of Lua is up to the Redis project maintainers and should be a +manual procedure performed by taking a diff between the different versions. + +Currently we have at least the following differences between official Lua 5.1 +and our version: + +1. Makefile is modified to allow a different compiler than GCC. +2. We have the implementation source code, and directly link to the following external libraries: `lua_cjson.o`, `lua_struct.o`, `lua_cmsgpack.o` and `lua_bit.o`. +3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order to avoid direct bytecode execution. + +Hdr_Histogram +--- + +Updated source can be found here: https://github.com/HdrHistogram/HdrHistogram_c +We use a customized version based on master branch commit e4448cf6d1cd08fff519812d3b1e58bd5a94ac42. +1. Compare all changes under /hdr_histogram directory to upstream master commit e4448cf6d1cd08fff519812d3b1e58bd5a94ac42 +2. Copy updated files from newer version onto files in /hdr_histogram. +3. Apply the changes from 1 above to the updated files. + diff --git a/deps/hdr_histogram/COPYING.txt b/deps/hdr_histogram/COPYING.txt new file mode 100644 index 0000000..0e259d4 --- /dev/null +++ b/deps/hdr_histogram/COPYING.txt @@ -0,0 +1,121 @@ +Creative Commons Legal Code + +CC0 1.0 Universal + + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS + PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED + HEREUNDER. + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator +and subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for +the purpose of contributing to a commons of creative, cultural and +scientific works ("Commons") that the public can reliably and without fear +of later claims of infringement build upon, modify, incorporate in other +works, reuse and redistribute as freely as possible in any form whatsoever +and for any purposes, including without limitation commercial purposes. +These owners may contribute to the Commons to promote the ideal of a free +culture and the further production of creative, cultural and scientific +works, or to gain reputation or greater distribution for their Work in +part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any +expectation of additional consideration or compensation, the person +associating CC0 with a Work (the "Affirmer"), to the extent that he or she +is an owner of Copyright and Related Rights in the Work, voluntarily +elects to apply CC0 to the Work and publicly distribute the Work under its +terms, with knowledge of his or her Copyright and Related Rights in the +Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not +limited to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, + communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); +iii. publicity and privacy rights pertaining to a person's image or + likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data + in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation + thereof, including any amended or successor version of such + directive); and +vii. other similar, equivalent or corresponding rights throughout the + world based on applicable law or treaty, and any national + implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention +of, applicable law, Affirmer hereby overtly, fully, permanently, +irrevocably and unconditionally waives, abandons, and surrenders all of +Affirmer's Copyright and Related Rights and associated claims and causes +of action, whether now known or unknown (including existing as well as +future claims and causes of action), in the Work (i) in all territories +worldwide, (ii) for the maximum duration provided by applicable law or +treaty (including future time extensions), (iii) in any current or future +medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional +purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each +member of the public at large and to the detriment of Affirmer's heirs and +successors, fully intending that such Waiver shall not be subject to +revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason +be judged legally invalid or ineffective under applicable law, then the +Waiver shall be preserved to the maximum extent permitted taking into +account Affirmer's express Statement of Purpose. In addition, to the +extent the Waiver is so judged Affirmer hereby grants to each affected +person a royalty-free, non transferable, non sublicensable, non exclusive, +irrevocable and unconditional license to exercise Affirmer's Copyright and +Related Rights in the Work (i) in all territories worldwide, (ii) for the +maximum duration provided by applicable law or treaty (including future +time extensions), (iii) in any current or future medium and for any number +of copies, and (iv) for any purpose whatsoever, including without +limitation commercial, advertising or promotional purposes (the +"License"). The License shall be deemed effective as of the date CC0 was +applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder +of the License, and in such case Affirmer hereby affirms that he or she +will not (i) exercise any of his or her remaining Copyright and Related +Rights in the Work or (ii) assert any associated claims and causes of +action with respect to the Work, in either case contrary to Affirmer's +express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or + warranties of any kind concerning the Work, express, implied, + statutory or otherwise, including without limitation warranties of + title, merchantability, fitness for a particular purpose, non + infringement, or the absence of latent or other defects, accuracy, or + the present or absence of errors, whether or not discoverable, all to + the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without + limitation any person's Copyright and Related Rights in the Work. + Further, Affirmer disclaims responsibility for obtaining any necessary + consents, permissions or other rights required for any use of the + Work. + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to + this CC0 or use of the Work. diff --git a/deps/hdr_histogram/LICENSE.txt b/deps/hdr_histogram/LICENSE.txt new file mode 100644 index 0000000..9b4e66e --- /dev/null +++ b/deps/hdr_histogram/LICENSE.txt @@ -0,0 +1,41 @@ +The code in this repository code was Written by Gil Tene, Michael Barker, +and Matt Warren, and released to the public domain, as explained at +http://creativecommons.org/publicdomain/zero/1.0/ + +For users of this code who wish to consume it under the "BSD" license +rather than under the public domain or CC0 contribution text mentioned +above, the code found under this directory is *also* provided under the +following license (commonly referred to as the BSD 2-Clause License). This +license does not detract from the above stated release of the code into +the public domain, and simply represents an additional license granted by +the Author. + +----------------------------------------------------------------------------- +** Beginning of "BSD 2-Clause License" text. ** + + Copyright (c) 2012, 2013, 2014 Gil Tene + Copyright (c) 2014 Michael Barker + Copyright (c) 2014 Matt Warren + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. diff --git a/deps/hdr_histogram/Makefile b/deps/hdr_histogram/Makefile new file mode 100644 index 0000000..28dd93e --- /dev/null +++ b/deps/hdr_histogram/Makefile @@ -0,0 +1,27 @@ +STD= -std=c99 +WARN= -Wall +OPT= -Os + +R_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) -DHDR_MALLOC_INCLUDE=\"hdr_redis_malloc.h\" +R_LDFLAGS= $(LDFLAGS) +DEBUG= -g + +R_CC=$(CC) $(R_CFLAGS) +R_LD=$(CC) $(R_LDFLAGS) + +AR= ar +ARFLAGS= rcs + +libhdrhistogram.a: hdr_histogram.o + $(AR) $(ARFLAGS) $@ $+ + +hdr_histogram.o: hdr_histogram.h hdr_histogram.c + +.c.o: + $(R_CC) -c $< + +clean: + rm -f *.o + rm -f *.a + + diff --git a/deps/hdr_histogram/README.md b/deps/hdr_histogram/README.md new file mode 100644 index 0000000..93a156e --- /dev/null +++ b/deps/hdr_histogram/README.md @@ -0,0 +1,78 @@ +HdrHistogram_c: 'C' port of High Dynamic Range (HDR) Histogram + +HdrHistogram +---------------------------------------------- + +[![Gitter chat](https://badges.gitter.im/HdrHistogram/HdrHistogram.png)](https://gitter.im/HdrHistogram/HdrHistogram) + +This port contains a subset of the functionality supported by the Java +implementation. The current supported features are: + +* Standard histogram with 64 bit counts (32/16 bit counts not supported) +* All iterator types (all values, recorded, percentiles, linear, logarithmic) +* Histogram serialisation (encoding version 1.2, decoding 1.0-1.2) +* Reader/writer phaser and interval recorder + +Features not supported, but planned + +* Auto-resizing of histograms + +Features unlikely to be implemented + +* Double histograms +* Atomic/Concurrent histograms +* 16/32 bit histograms + +# Simple Tutorial + +## Recording values + +```C +#include + +struct hdr_histogram* histogram; + +// Initialise the histogram +hdr_init( + 1, // Minimum value + INT64_C(3600000000), // Maximum value + 3, // Number of significant figures + &histogram) // Pointer to initialise + +// Record value +hdr_record_value( + histogram, // Histogram to record to + value) // Value to record + +// Record value n times +hdr_record_values( + histogram, // Histogram to record to + value, // Value to record + 10) // Record value 10 times + +// Record value with correction for co-ordinated omission. +hdr_record_corrected_value( + histogram, // Histogram to record to + value, // Value to record + 1000) // Record with expected interval of 1000. + +// Print out the values of the histogram +hdr_percentiles_print( + histogram, + stdout, // File to write to + 5, // Granularity of printed values + 1.0, // Multiplier for results + CLASSIC); // Format CLASSIC/CSV supported. +``` + +## More examples + +For more detailed examples of recording and logging results look at the +[hdr_decoder](examples/hdr_decoder.c) +and [hiccup](examples/hiccup.c) +examples. You can run hiccup and decoder +and pipe the results of one into the other. + +``` +$ ./examples/hiccup | ./examples/hdr_decoder +``` diff --git a/deps/hdr_histogram/hdr_atomic.h b/deps/hdr_histogram/hdr_atomic.h new file mode 100644 index 0000000..ae1056a --- /dev/null +++ b/deps/hdr_histogram/hdr_atomic.h @@ -0,0 +1,146 @@ +/** + * hdr_atomic.h + * Written by Philip Orwig and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + */ + +#ifndef HDR_ATOMIC_H__ +#define HDR_ATOMIC_H__ + + +#if defined(_MSC_VER) + +#include +#include +#include + +static void __inline * hdr_atomic_load_pointer(void** pointer) +{ + _ReadBarrier(); + return *pointer; +} + +static void hdr_atomic_store_pointer(void** pointer, void* value) +{ + _WriteBarrier(); + *pointer = value; +} + +static int64_t __inline hdr_atomic_load_64(int64_t* field) +{ + _ReadBarrier(); + return *field; +} + +static void __inline hdr_atomic_store_64(int64_t* field, int64_t value) +{ + _WriteBarrier(); + *field = value; +} + +static int64_t __inline hdr_atomic_exchange_64(volatile int64_t* field, int64_t value) +{ +#if defined(_WIN64) + return _InterlockedExchange64(field, value); +#else + int64_t comparand; + int64_t initial_value = *field; + do + { + comparand = initial_value; + initial_value = _InterlockedCompareExchange64(field, value, comparand); + } + while (comparand != initial_value); + + return initial_value; +#endif +} + +static int64_t __inline hdr_atomic_add_fetch_64(volatile int64_t* field, int64_t value) +{ +#if defined(_WIN64) + return _InterlockedExchangeAdd64(field, value) + value; +#else + int64_t comparand; + int64_t initial_value = *field; + do + { + comparand = initial_value; + initial_value = _InterlockedCompareExchange64(field, comparand + value, comparand); + } + while (comparand != initial_value); + + return initial_value + value; +#endif +} + +static bool __inline hdr_atomic_compare_exchange_64(volatile int64_t* field, int64_t* expected, int64_t desired) +{ + return *expected == _InterlockedCompareExchange64(field, desired, *expected); +} + +#elif defined(__ATOMIC_SEQ_CST) + +#define hdr_atomic_load_pointer(x) __atomic_load_n(x, __ATOMIC_SEQ_CST) +#define hdr_atomic_store_pointer(f,v) __atomic_store_n(f,v, __ATOMIC_SEQ_CST) +#define hdr_atomic_load_64(x) __atomic_load_n(x, __ATOMIC_SEQ_CST) +#define hdr_atomic_store_64(f,v) __atomic_store_n(f,v, __ATOMIC_SEQ_CST) +#define hdr_atomic_exchange_64(f,i) __atomic_exchange_n(f,i, __ATOMIC_SEQ_CST) +#define hdr_atomic_add_fetch_64(field, value) __atomic_add_fetch(field, value, __ATOMIC_SEQ_CST) +#define hdr_atomic_compare_exchange_64(field, expected, desired) __atomic_compare_exchange_n(field, expected, desired, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) + +#elif defined(__x86_64__) + +#include +#include + +static inline void* hdr_atomic_load_pointer(void** pointer) +{ + void* p = *pointer; + asm volatile ("" ::: "memory"); + return p; +} + +static inline void hdr_atomic_store_pointer(void** pointer, void* value) +{ + asm volatile ("lock; xchgq %0, %1" : "+q" (value), "+m" (*pointer)); +} + +static inline int64_t hdr_atomic_load_64(int64_t* field) +{ + int64_t i = *field; + asm volatile ("" ::: "memory"); + return i; +} + +static inline void hdr_atomic_store_64(int64_t* field, int64_t value) +{ + asm volatile ("lock; xchgq %0, %1" : "+q" (value), "+m" (*field)); +} + +static inline int64_t hdr_atomic_exchange_64(volatile int64_t* field, int64_t value) +{ + int64_t result = 0; + asm volatile ("lock; xchgq %1, %2" : "=r" (result), "+q" (value), "+m" (*field)); + return result; +} + +static inline int64_t hdr_atomic_add_fetch_64(volatile int64_t* field, int64_t value) +{ + return __sync_add_and_fetch(field, value); +} + +static inline bool hdr_atomic_compare_exchange_64(volatile int64_t* field, int64_t* expected, int64_t desired) +{ + int64_t original; + asm volatile( "lock; cmpxchgq %2, %1" : "=a"(original), "+m"(*field) : "q"(desired), "0"(*expected)); + return original == *expected; +} + +#else + +#error "Unable to determine atomic operations for your platform" + +#endif + +#endif /* HDR_ATOMIC_H__ */ diff --git a/deps/hdr_histogram/hdr_histogram.c b/deps/hdr_histogram/hdr_histogram.c new file mode 100644 index 0000000..f469347 --- /dev/null +++ b/deps/hdr_histogram/hdr_histogram.c @@ -0,0 +1,1229 @@ +/** + * hdr_histogram.c + * Written by Michael Barker and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hdr_histogram.h" +#include "hdr_tests.h" +#include "hdr_atomic.h" + +#ifndef HDR_MALLOC_INCLUDE +#define HDR_MALLOC_INCLUDE "hdr_malloc.h" +#endif + +#include HDR_MALLOC_INCLUDE + +/* ###### ####### ## ## ## ## ######## ###### */ +/* ## ## ## ## ## ## ### ## ## ## ## */ +/* ## ## ## ## ## #### ## ## ## */ +/* ## ## ## ## ## ## ## ## ## ###### */ +/* ## ## ## ## ## ## #### ## ## */ +/* ## ## ## ## ## ## ## ### ## ## ## */ +/* ###### ####### ####### ## ## ## ###### */ + +static int32_t normalize_index(const struct hdr_histogram* h, int32_t index) +{ + int32_t normalized_index; + int32_t adjustment = 0; + if (h->normalizing_index_offset == 0) + { + return index; + } + + normalized_index = index - h->normalizing_index_offset; + + if (normalized_index < 0) + { + adjustment = h->counts_len; + } + else if (normalized_index >= h->counts_len) + { + adjustment = -h->counts_len; + } + + return normalized_index + adjustment; +} + +static int64_t counts_get_direct(const struct hdr_histogram* h, int32_t index) +{ + return h->counts[index]; +} + +static int64_t counts_get_normalised(const struct hdr_histogram* h, int32_t index) +{ + return counts_get_direct(h, normalize_index(h, index)); +} + +static void counts_inc_normalised( + struct hdr_histogram* h, int32_t index, int64_t value) +{ + int32_t normalised_index = normalize_index(h, index); + h->counts[normalised_index] += value; + h->total_count += value; +} + +static void counts_inc_normalised_atomic( + struct hdr_histogram* h, int32_t index, int64_t value) +{ + int32_t normalised_index = normalize_index(h, index); + + hdr_atomic_add_fetch_64(&h->counts[normalised_index], value); + hdr_atomic_add_fetch_64(&h->total_count, value); +} + +static void update_min_max(struct hdr_histogram* h, int64_t value) +{ + h->min_value = (value < h->min_value && value != 0) ? value : h->min_value; + h->max_value = (value > h->max_value) ? value : h->max_value; +} + +static void update_min_max_atomic(struct hdr_histogram* h, int64_t value) +{ + int64_t current_min_value; + int64_t current_max_value; + do + { + current_min_value = hdr_atomic_load_64(&h->min_value); + + if (0 == value || current_min_value <= value) + { + break; + } + } + while (!hdr_atomic_compare_exchange_64(&h->min_value, ¤t_min_value, value)); + + do + { + current_max_value = hdr_atomic_load_64(&h->max_value); + + if (value <= current_max_value) + { + break; + } + } + while (!hdr_atomic_compare_exchange_64(&h->max_value, ¤t_max_value, value)); +} + + +/* ## ## ######## #### ## #### ######## ## ## */ +/* ## ## ## ## ## ## ## ## ## */ +/* ## ## ## ## ## ## ## #### */ +/* ## ## ## ## ## ## ## ## */ +/* ## ## ## ## ## ## ## ## */ +/* ## ## ## ## ## ## ## ## */ +/* ####### ## #### ######## #### ## ## */ + +static int64_t power(int64_t base, int64_t exp) +{ + int64_t result = 1; + while(exp) + { + result *= base; exp--; + } + return result; +} + +#if defined(_MSC_VER) +# if defined(_WIN64) +# pragma intrinsic(_BitScanReverse64) +# else +# pragma intrinsic(_BitScanReverse) +# endif +#endif + +static int32_t count_leading_zeros_64(int64_t value) +{ +#if defined(_MSC_VER) + uint32_t leading_zero = 0; +#if defined(_WIN64) + _BitScanReverse64(&leading_zero, value); +#else + uint32_t high = value >> 32; + if (_BitScanReverse(&leading_zero, high)) + { + leading_zero += 32; + } + else + { + uint32_t low = value & 0x00000000FFFFFFFF; + _BitScanReverse(&leading_zero, low); + } +#endif + return 63 - leading_zero; /* smallest power of 2 containing value */ +#else + return __builtin_clzll(value); /* smallest power of 2 containing value */ +#endif +} + +static int32_t get_bucket_index(const struct hdr_histogram* h, int64_t value) +{ + int32_t pow2ceiling = 64 - count_leading_zeros_64(value | h->sub_bucket_mask); /* smallest power of 2 containing value */ + return pow2ceiling - h->unit_magnitude - (h->sub_bucket_half_count_magnitude + 1); +} + +static int32_t get_sub_bucket_index(int64_t value, int32_t bucket_index, int32_t unit_magnitude) +{ + return (int32_t)(value >> (bucket_index + unit_magnitude)); +} + +static int32_t counts_index(const struct hdr_histogram* h, int32_t bucket_index, int32_t sub_bucket_index) +{ + /* Calculate the index for the first entry in the bucket: */ + /* (The following is the equivalent of ((bucket_index + 1) * subBucketHalfCount) ): */ + int32_t bucket_base_index = (bucket_index + 1) << h->sub_bucket_half_count_magnitude; + /* Calculate the offset in the bucket: */ + int32_t offset_in_bucket = sub_bucket_index - h->sub_bucket_half_count; + /* The following is the equivalent of ((sub_bucket_index - subBucketHalfCount) + bucketBaseIndex; */ + return bucket_base_index + offset_in_bucket; +} + +static int64_t value_from_index(int32_t bucket_index, int32_t sub_bucket_index, int32_t unit_magnitude) +{ + return ((int64_t) sub_bucket_index) << (bucket_index + unit_magnitude); +} + +int32_t counts_index_for(const struct hdr_histogram* h, int64_t value) +{ + int32_t bucket_index = get_bucket_index(h, value); + int32_t sub_bucket_index = get_sub_bucket_index(value, bucket_index, h->unit_magnitude); + + return counts_index(h, bucket_index, sub_bucket_index); +} + +int64_t hdr_value_at_index(const struct hdr_histogram *h, int32_t index) +{ + int32_t bucket_index = (index >> h->sub_bucket_half_count_magnitude) - 1; + int32_t sub_bucket_index = (index & (h->sub_bucket_half_count - 1)) + h->sub_bucket_half_count; + + if (bucket_index < 0) + { + sub_bucket_index -= h->sub_bucket_half_count; + bucket_index = 0; + } + + return value_from_index(bucket_index, sub_bucket_index, h->unit_magnitude); +} + +int64_t hdr_size_of_equivalent_value_range(const struct hdr_histogram* h, int64_t value) +{ + int32_t bucket_index = get_bucket_index(h, value); + int32_t sub_bucket_index = get_sub_bucket_index(value, bucket_index, h->unit_magnitude); + int32_t adjusted_bucket = (sub_bucket_index >= h->sub_bucket_count) ? (bucket_index + 1) : bucket_index; + return INT64_C(1) << (h->unit_magnitude + adjusted_bucket); +} + +static int64_t size_of_equivalent_value_range_given_bucket_indices( + const struct hdr_histogram *h, + int32_t bucket_index, + int32_t sub_bucket_index) +{ + const int32_t adjusted_bucket = (sub_bucket_index >= h->sub_bucket_count) ? (bucket_index + 1) : bucket_index; + return INT64_C(1) << (h->unit_magnitude + adjusted_bucket); +} + +static int64_t lowest_equivalent_value(const struct hdr_histogram* h, int64_t value) +{ + int32_t bucket_index = get_bucket_index(h, value); + int32_t sub_bucket_index = get_sub_bucket_index(value, bucket_index, h->unit_magnitude); + return value_from_index(bucket_index, sub_bucket_index, h->unit_magnitude); +} + +static int64_t lowest_equivalent_value_given_bucket_indices( + const struct hdr_histogram *h, + int32_t bucket_index, + int32_t sub_bucket_index) +{ + return value_from_index(bucket_index, sub_bucket_index, h->unit_magnitude); +} + +int64_t hdr_next_non_equivalent_value(const struct hdr_histogram *h, int64_t value) +{ + return lowest_equivalent_value(h, value) + hdr_size_of_equivalent_value_range(h, value); +} + +static int64_t highest_equivalent_value(const struct hdr_histogram* h, int64_t value) +{ + return hdr_next_non_equivalent_value(h, value) - 1; +} + +int64_t hdr_median_equivalent_value(const struct hdr_histogram *h, int64_t value) +{ + return lowest_equivalent_value(h, value) + (hdr_size_of_equivalent_value_range(h, value) >> 1); +} + +static int64_t non_zero_min(const struct hdr_histogram* h) +{ + if (INT64_MAX == h->min_value) + { + return INT64_MAX; + } + + return lowest_equivalent_value(h, h->min_value); +} + +void hdr_reset_internal_counters(struct hdr_histogram* h) +{ + int min_non_zero_index = -1; + int max_index = -1; + int64_t observed_total_count = 0; + int i; + + for (i = 0; i < h->counts_len; i++) + { + int64_t count_at_index; + + if ((count_at_index = counts_get_direct(h, i)) > 0) + { + observed_total_count += count_at_index; + max_index = i; + if (min_non_zero_index == -1 && i != 0) + { + min_non_zero_index = i; + } + } + } + + if (max_index == -1) + { + h->max_value = 0; + } + else + { + int64_t max_value = hdr_value_at_index(h, max_index); + h->max_value = highest_equivalent_value(h, max_value); + } + + if (min_non_zero_index == -1) + { + h->min_value = INT64_MAX; + } + else + { + h->min_value = hdr_value_at_index(h, min_non_zero_index); + } + + h->total_count = observed_total_count; +} + +static int32_t buckets_needed_to_cover_value(int64_t value, int32_t sub_bucket_count, int32_t unit_magnitude) +{ + int64_t smallest_untrackable_value = ((int64_t) sub_bucket_count) << unit_magnitude; + int32_t buckets_needed = 1; + while (smallest_untrackable_value <= value) + { + if (smallest_untrackable_value > INT64_MAX / 2) + { + return buckets_needed + 1; + } + smallest_untrackable_value <<= 1; + buckets_needed++; + } + + return buckets_needed; +} + +/* ## ## ######## ## ## ####### ######## ## ## */ +/* ### ### ## ### ### ## ## ## ## ## ## */ +/* #### #### ## #### #### ## ## ## ## #### */ +/* ## ### ## ###### ## ### ## ## ## ######## ## */ +/* ## ## ## ## ## ## ## ## ## ## */ +/* ## ## ## ## ## ## ## ## ## ## */ +/* ## ## ######## ## ## ####### ## ## ## */ + +int hdr_calculate_bucket_config( + int64_t lowest_discernible_value, + int64_t highest_trackable_value, + int significant_figures, + struct hdr_histogram_bucket_config* cfg) +{ + int32_t sub_bucket_count_magnitude; + int64_t largest_value_with_single_unit_resolution; + + if (lowest_discernible_value < 1 || + significant_figures < 1 || 5 < significant_figures || + lowest_discernible_value * 2 > highest_trackable_value) + { + return EINVAL; + } + + cfg->lowest_discernible_value = lowest_discernible_value; + cfg->significant_figures = significant_figures; + cfg->highest_trackable_value = highest_trackable_value; + + largest_value_with_single_unit_resolution = 2 * power(10, significant_figures); + sub_bucket_count_magnitude = (int32_t) ceil(log((double)largest_value_with_single_unit_resolution) / log(2)); + cfg->sub_bucket_half_count_magnitude = ((sub_bucket_count_magnitude > 1) ? sub_bucket_count_magnitude : 1) - 1; + + double unit_magnitude = log((double)lowest_discernible_value) / log(2); + if (INT32_MAX < unit_magnitude) + { + return EINVAL; + } + + cfg->unit_magnitude = (int32_t) unit_magnitude; + cfg->sub_bucket_count = (int32_t) pow(2, (cfg->sub_bucket_half_count_magnitude + 1)); + cfg->sub_bucket_half_count = cfg->sub_bucket_count / 2; + cfg->sub_bucket_mask = ((int64_t) cfg->sub_bucket_count - 1) << cfg->unit_magnitude; + + if (cfg->unit_magnitude + cfg->sub_bucket_half_count_magnitude > 61) + { + return EINVAL; + } + + cfg->bucket_count = buckets_needed_to_cover_value(highest_trackable_value, cfg->sub_bucket_count, (int32_t)cfg->unit_magnitude); + cfg->counts_len = (cfg->bucket_count + 1) * (cfg->sub_bucket_count / 2); + + return 0; +} + +void hdr_init_preallocated(struct hdr_histogram* h, struct hdr_histogram_bucket_config* cfg) +{ + h->lowest_discernible_value = cfg->lowest_discernible_value; + h->highest_trackable_value = cfg->highest_trackable_value; + h->unit_magnitude = (int32_t)cfg->unit_magnitude; + h->significant_figures = (int32_t)cfg->significant_figures; + h->sub_bucket_half_count_magnitude = cfg->sub_bucket_half_count_magnitude; + h->sub_bucket_half_count = cfg->sub_bucket_half_count; + h->sub_bucket_mask = cfg->sub_bucket_mask; + h->sub_bucket_count = cfg->sub_bucket_count; + h->min_value = INT64_MAX; + h->max_value = 0; + h->normalizing_index_offset = 0; + h->conversion_ratio = 1.0; + h->bucket_count = cfg->bucket_count; + h->counts_len = cfg->counts_len; + h->total_count = 0; +} + +int hdr_init( + int64_t lowest_discernible_value, + int64_t highest_trackable_value, + int significant_figures, + struct hdr_histogram** result) +{ + int64_t* counts; + struct hdr_histogram_bucket_config cfg; + struct hdr_histogram* histogram; + + int r = hdr_calculate_bucket_config(lowest_discernible_value, highest_trackable_value, significant_figures, &cfg); + if (r) + { + return r; + } + + counts = (int64_t*) hdr_calloc((size_t) cfg.counts_len, sizeof(int64_t)); + if (!counts) + { + return ENOMEM; + } + + histogram = (struct hdr_histogram*) hdr_calloc(1, sizeof(struct hdr_histogram)); + if (!histogram) + { + hdr_free(counts); + return ENOMEM; + } + + histogram->counts = counts; + + hdr_init_preallocated(histogram, &cfg); + *result = histogram; + + return 0; +} + +void hdr_close(struct hdr_histogram* h) +{ + if (h) { + hdr_free(h->counts); + hdr_free(h); + } +} + +int hdr_alloc(int64_t highest_trackable_value, int significant_figures, struct hdr_histogram** result) +{ + return hdr_init(1, highest_trackable_value, significant_figures, result); +} + +/* reset a histogram to zero. */ +void hdr_reset(struct hdr_histogram *h) +{ + h->total_count=0; + h->min_value = INT64_MAX; + h->max_value = 0; + memset(h->counts, 0, (sizeof(int64_t) * h->counts_len)); +} + +size_t hdr_get_memory_size(struct hdr_histogram *h) +{ + return sizeof(struct hdr_histogram) + h->counts_len * sizeof(int64_t); +} + +/* ## ## ######## ######## ### ######## ######## ###### */ +/* ## ## ## ## ## ## ## ## ## ## ## ## */ +/* ## ## ## ## ## ## ## ## ## ## ## */ +/* ## ## ######## ## ## ## ## ## ###### ###### */ +/* ## ## ## ## ## ######### ## ## ## */ +/* ## ## ## ## ## ## ## ## ## ## ## */ +/* ####### ## ######## ## ## ## ######## ###### */ + + +bool hdr_record_value(struct hdr_histogram* h, int64_t value) +{ + return hdr_record_values(h, value, 1); +} + +bool hdr_record_value_atomic(struct hdr_histogram* h, int64_t value) +{ + return hdr_record_values_atomic(h, value, 1); +} + +bool hdr_record_values(struct hdr_histogram* h, int64_t value, int64_t count) +{ + int32_t counts_index; + + if (value < 0) + { + return false; + } + + counts_index = counts_index_for(h, value); + + if (counts_index < 0 || h->counts_len <= counts_index) + { + return false; + } + + counts_inc_normalised(h, counts_index, count); + update_min_max(h, value); + + return true; +} + +bool hdr_record_values_atomic(struct hdr_histogram* h, int64_t value, int64_t count) +{ + int32_t counts_index; + + if (value < 0) + { + return false; + } + + counts_index = counts_index_for(h, value); + + if (counts_index < 0 || h->counts_len <= counts_index) + { + return false; + } + + counts_inc_normalised_atomic(h, counts_index, count); + update_min_max_atomic(h, value); + + return true; +} + +bool hdr_record_corrected_value(struct hdr_histogram* h, int64_t value, int64_t expected_interval) +{ + return hdr_record_corrected_values(h, value, 1, expected_interval); +} + +bool hdr_record_corrected_value_atomic(struct hdr_histogram* h, int64_t value, int64_t expected_interval) +{ + return hdr_record_corrected_values_atomic(h, value, 1, expected_interval); +} + +bool hdr_record_corrected_values(struct hdr_histogram* h, int64_t value, int64_t count, int64_t expected_interval) +{ + int64_t missing_value; + + if (!hdr_record_values(h, value, count)) + { + return false; + } + + if (expected_interval <= 0 || value <= expected_interval) + { + return true; + } + + missing_value = value - expected_interval; + for (; missing_value >= expected_interval; missing_value -= expected_interval) + { + if (!hdr_record_values(h, missing_value, count)) + { + return false; + } + } + + return true; +} + +bool hdr_record_corrected_values_atomic(struct hdr_histogram* h, int64_t value, int64_t count, int64_t expected_interval) +{ + int64_t missing_value; + + if (!hdr_record_values_atomic(h, value, count)) + { + return false; + } + + if (expected_interval <= 0 || value <= expected_interval) + { + return true; + } + + missing_value = value - expected_interval; + for (; missing_value >= expected_interval; missing_value -= expected_interval) + { + if (!hdr_record_values_atomic(h, missing_value, count)) + { + return false; + } + } + + return true; +} + +int64_t hdr_add(struct hdr_histogram* h, const struct hdr_histogram* from) +{ + struct hdr_iter iter; + int64_t dropped = 0; + hdr_iter_recorded_init(&iter, from); + + while (hdr_iter_next(&iter)) + { + int64_t value = iter.value; + int64_t count = iter.count; + + if (!hdr_record_values(h, value, count)) + { + dropped += count; + } + } + + return dropped; +} + +int64_t hdr_add_while_correcting_for_coordinated_omission( + struct hdr_histogram* h, struct hdr_histogram* from, int64_t expected_interval) +{ + struct hdr_iter iter; + int64_t dropped = 0; + hdr_iter_recorded_init(&iter, from); + + while (hdr_iter_next(&iter)) + { + int64_t value = iter.value; + int64_t count = iter.count; + + if (!hdr_record_corrected_values(h, value, count, expected_interval)) + { + dropped += count; + } + } + + return dropped; +} + + + +/* ## ## ### ## ## ## ######## ###### */ +/* ## ## ## ## ## ## ## ## ## ## */ +/* ## ## ## ## ## ## ## ## ## */ +/* ## ## ## ## ## ## ## ###### ###### */ +/* ## ## ######### ## ## ## ## ## */ +/* ## ## ## ## ## ## ## ## ## ## */ +/* ### ## ## ######## ####### ######## ###### */ + + +int64_t hdr_max(const struct hdr_histogram* h) +{ + if (0 == h->max_value) + { + return 0; + } + + return highest_equivalent_value(h, h->max_value); +} + +int64_t hdr_min(const struct hdr_histogram* h) +{ + if (0 < hdr_count_at_index(h, 0)) + { + return 0; + } + + return non_zero_min(h); +} + +static int64_t get_value_from_idx_up_to_count(const struct hdr_histogram* h, int64_t count_at_percentile) +{ + int64_t count_to_idx = 0; + + count_at_percentile = 0 < count_at_percentile ? count_at_percentile : 1; + for (int32_t idx = 0; idx < h->counts_len; idx++) + { + count_to_idx += h->counts[idx]; + if (count_to_idx >= count_at_percentile) + { + return hdr_value_at_index(h, idx); + } + } + + return 0; +} + + +int64_t hdr_value_at_percentile(const struct hdr_histogram* h, double percentile) +{ + double requested_percentile = percentile < 100.0 ? percentile : 100.0; + int64_t count_at_percentile = + (int64_t) (((requested_percentile / 100) * h->total_count) + 0.5); + int64_t value_from_idx = get_value_from_idx_up_to_count(h, count_at_percentile); + if (percentile == 0.0) + { + return lowest_equivalent_value(h, value_from_idx); + } + return highest_equivalent_value(h, value_from_idx); +} + +int hdr_value_at_percentiles(const struct hdr_histogram *h, const double *percentiles, int64_t *values, size_t length) +{ + if (NULL == percentiles || NULL == values) + { + return EINVAL; + } + + struct hdr_iter iter; + const int64_t total_count = h->total_count; + // to avoid allocations we use the values array for intermediate computation + // i.e. to store the expected cumulative count at each percentile + for (size_t i = 0; i < length; i++) + { + const double requested_percentile = percentiles[i] < 100.0 ? percentiles[i] : 100.0; + const int64_t count_at_percentile = + (int64_t) (((requested_percentile / 100) * total_count) + 0.5); + values[i] = count_at_percentile > 1 ? count_at_percentile : 1; + } + + hdr_iter_init(&iter, h); + int64_t total = 0; + size_t at_pos = 0; + while (hdr_iter_next(&iter) && at_pos < length) + { + total += iter.count; + while (at_pos < length && total >= values[at_pos]) + { + values[at_pos] = highest_equivalent_value(h, iter.value); + at_pos++; + } + } + return 0; +} + +double hdr_mean(const struct hdr_histogram* h) +{ + struct hdr_iter iter; + int64_t total = 0; + + hdr_iter_init(&iter, h); + + while (hdr_iter_next(&iter)) + { + if (0 != iter.count) + { + total += iter.count * hdr_median_equivalent_value(h, iter.value); + } + } + + return (total * 1.0) / h->total_count; +} + +double hdr_stddev(const struct hdr_histogram* h) +{ + double mean = hdr_mean(h); + double geometric_dev_total = 0.0; + + struct hdr_iter iter; + hdr_iter_init(&iter, h); + + while (hdr_iter_next(&iter)) + { + if (0 != iter.count) + { + double dev = (hdr_median_equivalent_value(h, iter.value) * 1.0) - mean; + geometric_dev_total += (dev * dev) * iter.count; + } + } + + return sqrt(geometric_dev_total / h->total_count); +} + +bool hdr_values_are_equivalent(const struct hdr_histogram* h, int64_t a, int64_t b) +{ + return lowest_equivalent_value(h, a) == lowest_equivalent_value(h, b); +} + +int64_t hdr_lowest_equivalent_value(const struct hdr_histogram* h, int64_t value) +{ + return lowest_equivalent_value(h, value); +} + +int64_t hdr_count_at_value(const struct hdr_histogram* h, int64_t value) +{ + return counts_get_normalised(h, counts_index_for(h, value)); +} + +int64_t hdr_count_at_index(const struct hdr_histogram* h, int32_t index) +{ + return counts_get_normalised(h, index); +} + + +/* #### ######## ######## ######## ### ######## ####### ######## ###### */ +/* ## ## ## ## ## ## ## ## ## ## ## ## ## ## */ +/* ## ## ## ## ## ## ## ## ## ## ## ## ## */ +/* ## ## ###### ######## ## ## ## ## ## ######## ###### */ +/* ## ## ## ## ## ######### ## ## ## ## ## ## */ +/* ## ## ## ## ## ## ## ## ## ## ## ## ## ## */ +/* #### ## ######## ## ## ## ## ## ####### ## ## ###### */ + + +static bool has_buckets(struct hdr_iter* iter) +{ + return iter->counts_index < iter->h->counts_len; +} + +static bool has_next(struct hdr_iter* iter) +{ + return iter->cumulative_count < iter->total_count; +} + +static bool move_next(struct hdr_iter* iter) +{ + iter->counts_index++; + + if (!has_buckets(iter)) + { + return false; + } + + iter->count = counts_get_normalised(iter->h, iter->counts_index); + iter->cumulative_count += iter->count; + const int64_t value = hdr_value_at_index(iter->h, iter->counts_index); + const int32_t bucket_index = get_bucket_index(iter->h, value); + const int32_t sub_bucket_index = get_sub_bucket_index(value, bucket_index, iter->h->unit_magnitude); + const int64_t leq = lowest_equivalent_value_given_bucket_indices(iter->h, bucket_index, sub_bucket_index); + const int64_t size_of_equivalent_value_range = size_of_equivalent_value_range_given_bucket_indices( + iter->h, bucket_index, sub_bucket_index); + iter->lowest_equivalent_value = leq; + iter->value = value; + iter->highest_equivalent_value = leq + size_of_equivalent_value_range - 1; + iter->median_equivalent_value = leq + (size_of_equivalent_value_range >> 1); + + return true; +} + +static int64_t peek_next_value_from_index(struct hdr_iter* iter) +{ + return hdr_value_at_index(iter->h, iter->counts_index + 1); +} + +static bool next_value_greater_than_reporting_level_upper_bound( + struct hdr_iter *iter, int64_t reporting_level_upper_bound) +{ + if (iter->counts_index >= iter->h->counts_len) + { + return false; + } + + return peek_next_value_from_index(iter) > reporting_level_upper_bound; +} + +static bool basic_iter_next(struct hdr_iter *iter) +{ + if (!has_next(iter) || iter->counts_index >= iter->h->counts_len) + { + return false; + } + + move_next(iter); + + return true; +} + +static void update_iterated_values(struct hdr_iter* iter, int64_t new_value_iterated_to) +{ + iter->value_iterated_from = iter->value_iterated_to; + iter->value_iterated_to = new_value_iterated_to; +} + +static bool all_values_iter_next(struct hdr_iter* iter) +{ + bool result = move_next(iter); + + if (result) + { + update_iterated_values(iter, iter->value); + } + + return result; +} + +void hdr_iter_init(struct hdr_iter* iter, const struct hdr_histogram* h) +{ + iter->h = h; + + iter->counts_index = -1; + iter->total_count = h->total_count; + iter->count = 0; + iter->cumulative_count = 0; + iter->value = 0; + iter->highest_equivalent_value = 0; + iter->value_iterated_from = 0; + iter->value_iterated_to = 0; + + iter->_next_fp = all_values_iter_next; +} + +bool hdr_iter_next(struct hdr_iter* iter) +{ + return iter->_next_fp(iter); +} + +/* ######## ######## ######## ###### ######## ## ## ######## #### ## ######## ###### */ +/* ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## ## */ +/* ## ## ## ## ## ## ## #### ## ## ## ## ## ## */ +/* ######## ###### ######## ## ###### ## ## ## ## ## ## ###### ###### */ +/* ## ## ## ## ## ## ## #### ## ## ## ## ## */ +/* ## ## ## ## ## ## ## ## ### ## ## ## ## ## ## */ +/* ## ######## ## ## ###### ######## ## ## ## #### ######## ######## ###### */ + +static bool percentile_iter_next(struct hdr_iter* iter) +{ + int64_t temp, half_distance, percentile_reporting_ticks; + + struct hdr_iter_percentiles* percentiles = &iter->specifics.percentiles; + + if (!has_next(iter)) + { + if (percentiles->seen_last_value) + { + return false; + } + + percentiles->seen_last_value = true; + percentiles->percentile = 100.0; + + return true; + } + + if (iter->counts_index == -1 && !basic_iter_next(iter)) + { + return false; + } + + do + { + double current_percentile = (100.0 * (double) iter->cumulative_count) / iter->h->total_count; + if (iter->count != 0 && + percentiles->percentile_to_iterate_to <= current_percentile) + { + update_iterated_values(iter, highest_equivalent_value(iter->h, iter->value)); + + percentiles->percentile = percentiles->percentile_to_iterate_to; + temp = (int64_t)(log(100 / (100.0 - (percentiles->percentile_to_iterate_to))) / log(2)) + 1; + half_distance = (int64_t) pow(2, (double) temp); + percentile_reporting_ticks = percentiles->ticks_per_half_distance * half_distance; + percentiles->percentile_to_iterate_to += 100.0 / percentile_reporting_ticks; + + return true; + } + } + while (basic_iter_next(iter)); + + return true; +} + +void hdr_iter_percentile_init(struct hdr_iter* iter, const struct hdr_histogram* h, int32_t ticks_per_half_distance) +{ + iter->h = h; + + hdr_iter_init(iter, h); + + iter->specifics.percentiles.seen_last_value = false; + iter->specifics.percentiles.ticks_per_half_distance = ticks_per_half_distance; + iter->specifics.percentiles.percentile_to_iterate_to = 0.0; + iter->specifics.percentiles.percentile = 0.0; + + iter->_next_fp = percentile_iter_next; +} + +static void format_line_string(char* str, size_t len, int significant_figures, format_type format) +{ +#if defined(_MSC_VER) +#define snprintf _snprintf +#pragma warning(push) +#pragma warning(disable: 4996) +#endif + const char* format_str = "%s%d%s"; + + switch (format) + { + case CSV: + snprintf(str, len, format_str, "%.", significant_figures, "f,%f,%d,%.2f\n"); + break; + case CLASSIC: + snprintf(str, len, format_str, "%12.", significant_figures, "f %12f %12d %12.2f\n"); + break; + default: + snprintf(str, len, format_str, "%12.", significant_figures, "f %12f %12d %12.2f\n"); + } +#if defined(_MSC_VER) +#undef snprintf +#pragma warning(pop) +#endif +} + + +/* ######## ######## ###### ####### ######## ######## ######## ######## */ +/* ## ## ## ## ## ## ## ## ## ## ## ## ## ## */ +/* ## ## ## ## ## ## ## ## ## ## ## ## ## */ +/* ######## ###### ## ## ## ######## ## ## ###### ## ## */ +/* ## ## ## ## ## ## ## ## ## ## ## ## ## */ +/* ## ## ## ## ## ## ## ## ## ## ## ## ## ## */ +/* ## ## ######## ###### ####### ## ## ######## ######## ######## */ + + +static bool recorded_iter_next(struct hdr_iter* iter) +{ + while (basic_iter_next(iter)) + { + if (iter->count != 0) + { + update_iterated_values(iter, iter->value); + + iter->specifics.recorded.count_added_in_this_iteration_step = iter->count; + return true; + } + } + + return false; +} + +void hdr_iter_recorded_init(struct hdr_iter* iter, const struct hdr_histogram* h) +{ + hdr_iter_init(iter, h); + + iter->specifics.recorded.count_added_in_this_iteration_step = 0; + + iter->_next_fp = recorded_iter_next; +} + +/* ## #### ## ## ######## ### ######## */ +/* ## ## ### ## ## ## ## ## ## */ +/* ## ## #### ## ## ## ## ## ## */ +/* ## ## ## ## ## ###### ## ## ######## */ +/* ## ## ## #### ## ######### ## ## */ +/* ## ## ## ### ## ## ## ## ## */ +/* ######## #### ## ## ######## ## ## ## ## */ + + +static bool iter_linear_next(struct hdr_iter* iter) +{ + struct hdr_iter_linear* linear = &iter->specifics.linear; + + linear->count_added_in_this_iteration_step = 0; + + if (has_next(iter) || + next_value_greater_than_reporting_level_upper_bound( + iter, linear->next_value_reporting_level_lowest_equivalent)) + { + do + { + if (iter->value >= linear->next_value_reporting_level_lowest_equivalent) + { + update_iterated_values(iter, linear->next_value_reporting_level); + + linear->next_value_reporting_level += linear->value_units_per_bucket; + linear->next_value_reporting_level_lowest_equivalent = + lowest_equivalent_value(iter->h, linear->next_value_reporting_level); + + return true; + } + + if (!move_next(iter)) + { + return true; + } + + linear->count_added_in_this_iteration_step += iter->count; + } + while (true); + } + + return false; +} + + +void hdr_iter_linear_init(struct hdr_iter* iter, const struct hdr_histogram* h, int64_t value_units_per_bucket) +{ + hdr_iter_init(iter, h); + + iter->specifics.linear.count_added_in_this_iteration_step = 0; + iter->specifics.linear.value_units_per_bucket = value_units_per_bucket; + iter->specifics.linear.next_value_reporting_level = value_units_per_bucket; + iter->specifics.linear.next_value_reporting_level_lowest_equivalent = lowest_equivalent_value(h, value_units_per_bucket); + + iter->_next_fp = iter_linear_next; +} + +void hdr_iter_linear_set_value_units_per_bucket(struct hdr_iter* iter, int64_t value_units_per_bucket) +{ + iter->specifics.linear.value_units_per_bucket = value_units_per_bucket; +} + +/* ## ####### ###### ### ######## #### ######## ## ## ## ## #### ###### */ +/* ## ## ## ## ## ## ## ## ## ## ## ## ## ### ### ## ## ## */ +/* ## ## ## ## ## ## ## ## ## ## ## ## #### #### ## ## */ +/* ## ## ## ## #### ## ## ######## ## ## ######### ## ### ## ## ## */ +/* ## ## ## ## ## ######### ## ## ## ## ## ## ## ## ## ## */ +/* ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## */ +/* ######## ####### ###### ## ## ## ## #### ## ## ## ## ## #### ###### */ + +static bool log_iter_next(struct hdr_iter *iter) +{ + struct hdr_iter_log* logarithmic = &iter->specifics.log; + + logarithmic->count_added_in_this_iteration_step = 0; + + if (has_next(iter) || + next_value_greater_than_reporting_level_upper_bound( + iter, logarithmic->next_value_reporting_level_lowest_equivalent)) + { + do + { + if (iter->value >= logarithmic->next_value_reporting_level_lowest_equivalent) + { + update_iterated_values(iter, logarithmic->next_value_reporting_level); + + logarithmic->next_value_reporting_level *= (int64_t)logarithmic->log_base; + logarithmic->next_value_reporting_level_lowest_equivalent = lowest_equivalent_value(iter->h, logarithmic->next_value_reporting_level); + + return true; + } + + if (!move_next(iter)) + { + return true; + } + + logarithmic->count_added_in_this_iteration_step += iter->count; + } + while (true); + } + + return false; +} + +void hdr_iter_log_init( + struct hdr_iter* iter, + const struct hdr_histogram* h, + int64_t value_units_first_bucket, + double log_base) +{ + hdr_iter_init(iter, h); + iter->specifics.log.count_added_in_this_iteration_step = 0; + iter->specifics.log.log_base = log_base; + iter->specifics.log.next_value_reporting_level = value_units_first_bucket; + iter->specifics.log.next_value_reporting_level_lowest_equivalent = lowest_equivalent_value(h, value_units_first_bucket); + + iter->_next_fp = log_iter_next; +} + +/* Printing. */ + +static const char* format_head_string(format_type format) +{ + switch (format) + { + case CSV: + return "%s,%s,%s,%s\n"; + case CLASSIC: + default: + return "%12s %12s %12s %12s\n\n"; + } +} + +static const char CLASSIC_FOOTER[] = + "#[Mean = %12.3f, StdDeviation = %12.3f]\n" + "#[Max = %12.3f, Total count = %12" PRIu64 "]\n" + "#[Buckets = %12d, SubBuckets = %12d]\n"; + +int hdr_percentiles_print( + struct hdr_histogram* h, FILE* stream, int32_t ticks_per_half_distance, + double value_scale, format_type format) +{ + char line_format[25]; + const char* head_format; + int rc = 0; + struct hdr_iter iter; + struct hdr_iter_percentiles * percentiles; + + format_line_string(line_format, 25, h->significant_figures, format); + head_format = format_head_string(format); + + hdr_iter_percentile_init(&iter, h, ticks_per_half_distance); + + if (fprintf( + stream, head_format, + "Value", "Percentile", "TotalCount", "1/(1-Percentile)") < 0) + { + rc = EIO; + goto cleanup; + } + + percentiles = &iter.specifics.percentiles; + while (hdr_iter_next(&iter)) + { + double value = iter.highest_equivalent_value / value_scale; + double percentile = percentiles->percentile / 100.0; + int64_t total_count = iter.cumulative_count; + double inverted_percentile = (1.0 / (1.0 - percentile)); + + if (fprintf( + stream, line_format, value, percentile, total_count, inverted_percentile) < 0) + { + rc = EIO; + goto cleanup; + } + } + + if (CLASSIC == format) + { + double mean = hdr_mean(h) / value_scale; + double stddev = hdr_stddev(h) / value_scale; + double max = hdr_max(h) / value_scale; + + if (fprintf( + stream, CLASSIC_FOOTER, mean, stddev, max, + h->total_count, h->bucket_count, h->sub_bucket_count) < 0) + { + rc = EIO; + goto cleanup; + } + } + + cleanup: + return rc; +} diff --git a/deps/hdr_histogram/hdr_histogram.h b/deps/hdr_histogram/hdr_histogram.h new file mode 100644 index 0000000..01b1e08 --- /dev/null +++ b/deps/hdr_histogram/hdr_histogram.h @@ -0,0 +1,521 @@ +/** + * hdr_histogram.h + * Written by Michael Barker and released to the public domain, + * as explained at http://creativecommons.org/publicdomain/zero/1.0/ + * + * The source for the hdr_histogram utilises a few C99 constructs, specifically + * the use of stdint/stdbool and inline variable declaration. + */ + +#ifndef HDR_HISTOGRAM_H +#define HDR_HISTOGRAM_H 1 + +#include +#include +#include + +struct hdr_histogram +{ + int64_t lowest_discernible_value; + int64_t highest_trackable_value; + int32_t unit_magnitude; + int32_t significant_figures; + int32_t sub_bucket_half_count_magnitude; + int32_t sub_bucket_half_count; + int64_t sub_bucket_mask; + int32_t sub_bucket_count; + int32_t bucket_count; + int64_t min_value; + int64_t max_value; + int32_t normalizing_index_offset; + double conversion_ratio; + int32_t counts_len; + int64_t total_count; + int64_t* counts; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Allocate the memory and initialise the hdr_histogram. + * + * Due to the size of the histogram being the result of some reasonably + * involved math on the input parameters this function it is tricky to stack allocate. + * The histogram should be released with hdr_close + * + * @param lowest_discernible_value The smallest possible value that is distinguishable from 0. + * Must be a positive integer that is >= 1. May be internally rounded down to nearest power of 2. + * @param highest_trackable_value The largest possible value to be put into the + * histogram. + * @param significant_figures The level of precision for this histogram, i.e. the number + * of figures in a decimal number that will be maintained. E.g. a value of 3 will mean + * the results from the histogram will be accurate up to the first three digits. Must + * be a value between 1 and 5 (inclusive). + * @param result Output parameter to capture allocated histogram. + * @return 0 on success, EINVAL if lowest_discernible_value is < 1 or the + * significant_figure value is outside of the allowed range, ENOMEM if malloc + * failed. + */ +int hdr_init( + int64_t lowest_discernible_value, + int64_t highest_trackable_value, + int significant_figures, + struct hdr_histogram** result); + +/** + * Free the memory and close the hdr_histogram. + * + * @param h The histogram you want to close. + */ +void hdr_close(struct hdr_histogram* h); + +/** + * Allocate the memory and initialise the hdr_histogram. This is the equivalent of calling + * hdr_init(1, highest_trackable_value, significant_figures, result); + * + * @deprecated use hdr_init. + */ +int hdr_alloc(int64_t highest_trackable_value, int significant_figures, struct hdr_histogram** result); + + +/** + * Reset a histogram to zero - empty out a histogram and re-initialise it + * + * If you want to re-use an existing histogram, but reset everything back to zero, this + * is the routine to use. + * + * @param h The histogram you want to reset to empty. + * + */ +void hdr_reset(struct hdr_histogram* h); + +/** + * Get the memory size of the hdr_histogram. + * + * @param h "This" pointer + * @return The amount of memory used by the hdr_histogram in bytes + */ +size_t hdr_get_memory_size(struct hdr_histogram* h); + +/** + * Records a value in the histogram, will round this value of to a precision at or better + * than the significant_figure specified at construction time. + * + * @param h "This" pointer + * @param value Value to add to the histogram + * @return false if the value is larger than the highest_trackable_value and can't be recorded, + * true otherwise. + */ +bool hdr_record_value(struct hdr_histogram* h, int64_t value); + +/** + * Records a value in the histogram, will round this value of to a precision at or better + * than the significant_figure specified at construction time. + * + * Will record this value atomically, however the whole structure may appear inconsistent + * when read concurrently with this update. Do NOT mix calls to this method with calls + * to non-atomic updates. + * + * @param h "This" pointer + * @param value Value to add to the histogram + * @return false if the value is larger than the highest_trackable_value and can't be recorded, + * true otherwise. + */ +bool hdr_record_value_atomic(struct hdr_histogram* h, int64_t value); + +/** + * Records count values in the histogram, will round this value of to a + * precision at or better than the significant_figure specified at construction + * time. + * + * @param h "This" pointer + * @param value Value to add to the histogram + * @param count Number of 'value's to add to the histogram + * @return false if any value is larger than the highest_trackable_value and can't be recorded, + * true otherwise. + */ +bool hdr_record_values(struct hdr_histogram* h, int64_t value, int64_t count); + +/** + * Records count values in the histogram, will round this value of to a + * precision at or better than the significant_figure specified at construction + * time. + * + * Will record this value atomically, however the whole structure may appear inconsistent + * when read concurrently with this update. Do NOT mix calls to this method with calls + * to non-atomic updates. + * + * @param h "This" pointer + * @param value Value to add to the histogram + * @param count Number of 'value's to add to the histogram + * @return false if any value is larger than the highest_trackable_value and can't be recorded, + * true otherwise. + */ +bool hdr_record_values_atomic(struct hdr_histogram* h, int64_t value, int64_t count); + +/** + * Record a value in the histogram and backfill based on an expected interval. + * + * Records a value in the histogram, will round this value of to a precision at or better + * than the significant_figure specified at construction time. This is specifically used + * for recording latency. If the value is larger than the expected_interval then the + * latency recording system has experienced co-ordinated omission. This method fills in the + * values that would have occurred had the client providing the load not been blocked. + + * @param h "This" pointer + * @param value Value to add to the histogram + * @param expected_interval The delay between recording values. + * @return false if the value is larger than the highest_trackable_value and can't be recorded, + * true otherwise. + */ +bool hdr_record_corrected_value(struct hdr_histogram* h, int64_t value, int64_t expected_interval); + +/** + * Record a value in the histogram and backfill based on an expected interval. + * + * Records a value in the histogram, will round this value of to a precision at or better + * than the significant_figure specified at construction time. This is specifically used + * for recording latency. If the value is larger than the expected_interval then the + * latency recording system has experienced co-ordinated omission. This method fills in the + * values that would have occurred had the client providing the load not been blocked. + * + * Will record this value atomically, however the whole structure may appear inconsistent + * when read concurrently with this update. Do NOT mix calls to this method with calls + * to non-atomic updates. + * + * @param h "This" pointer + * @param value Value to add to the histogram + * @param expected_interval The delay between recording values. + * @return false if the value is larger than the highest_trackable_value and can't be recorded, + * true otherwise. + */ +bool hdr_record_corrected_value_atomic(struct hdr_histogram* h, int64_t value, int64_t expected_interval); + +/** + * Record a value in the histogram 'count' times. Applies the same correcting logic + * as 'hdr_record_corrected_value'. + * + * @param h "This" pointer + * @param value Value to add to the histogram + * @param count Number of 'value's to add to the histogram + * @param expected_interval The delay between recording values. + * @return false if the value is larger than the highest_trackable_value and can't be recorded, + * true otherwise. + */ +bool hdr_record_corrected_values(struct hdr_histogram* h, int64_t value, int64_t count, int64_t expected_interval); + +/** + * Record a value in the histogram 'count' times. Applies the same correcting logic + * as 'hdr_record_corrected_value'. + * + * Will record this value atomically, however the whole structure may appear inconsistent + * when read concurrently with this update. Do NOT mix calls to this method with calls + * to non-atomic updates. + * + * @param h "This" pointer + * @param value Value to add to the histogram + * @param count Number of 'value's to add to the histogram + * @param expected_interval The delay between recording values. + * @return false if the value is larger than the highest_trackable_value and can't be recorded, + * true otherwise. + */ +bool hdr_record_corrected_values_atomic(struct hdr_histogram* h, int64_t value, int64_t count, int64_t expected_interval); + +/** + * Adds all of the values from 'from' to 'this' histogram. Will return the + * number of values that are dropped when copying. Values will be dropped + * if they around outside of h.lowest_discernible_value and + * h.highest_trackable_value. + * + * @param h "This" pointer + * @param from Histogram to copy values from. + * @return The number of values dropped when copying. + */ +int64_t hdr_add(struct hdr_histogram* h, const struct hdr_histogram* from); + +/** + * Adds all of the values from 'from' to 'this' histogram. Will return the + * number of values that are dropped when copying. Values will be dropped + * if they around outside of h.lowest_discernible_value and + * h.highest_trackable_value. + * + * @param h "This" pointer + * @param from Histogram to copy values from. + * @return The number of values dropped when copying. + */ +int64_t hdr_add_while_correcting_for_coordinated_omission( + struct hdr_histogram* h, struct hdr_histogram* from, int64_t expected_interval); + +/** + * Get minimum value from the histogram. Will return 2^63-1 if the histogram + * is empty. + * + * @param h "This" pointer + */ +int64_t hdr_min(const struct hdr_histogram* h); + +/** + * Get maximum value from the histogram. Will return 0 if the histogram + * is empty. + * + * @param h "This" pointer + */ +int64_t hdr_max(const struct hdr_histogram* h); + +/** + * Get the value at a specific percentile. + * + * @param h "This" pointer. + * @param percentile The percentile to get the value for + */ +int64_t hdr_value_at_percentile(const struct hdr_histogram* h, double percentile); + +/** + * Get the values at the given percentiles. + * + * @param h "This" pointer. + * @param percentiles The ordered percentiles array to get the values for. + * @param length Number of elements in the arrays. + * @param values Destination array containing the values at the given percentiles. + * The values array should be allocated by the caller. + * @return 0 on success, ENOMEM if the provided destination array is null. + */ +int hdr_value_at_percentiles(const struct hdr_histogram *h, const double *percentiles, int64_t *values, size_t length); + +/** + * Gets the standard deviation for the values in the histogram. + * + * @param h "This" pointer + * @return The standard deviation + */ +double hdr_stddev(const struct hdr_histogram* h); + +/** + * Gets the mean for the values in the histogram. + * + * @param h "This" pointer + * @return The mean + */ +double hdr_mean(const struct hdr_histogram* h); + +/** + * Determine if two values are equivalent with the histogram's resolution. + * Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param h "This" pointer + * @param a first value to compare + * @param b second value to compare + * @return 'true' if values are equivalent with the histogram's resolution. + */ +bool hdr_values_are_equivalent(const struct hdr_histogram* h, int64_t a, int64_t b); + +/** + * Get the lowest value that is equivalent to the given value within the histogram's resolution. + * Where "equivalent" means that value samples recorded for any two + * equivalent values are counted in a common total count. + * + * @param h "This" pointer + * @param value The given value + * @return The lowest value that is equivalent to the given value within the histogram's resolution. + */ +int64_t hdr_lowest_equivalent_value(const struct hdr_histogram* h, int64_t value); + +/** + * Get the count of recorded values at a specific value + * (to within the histogram resolution at the value level). + * + * @param h "This" pointer + * @param value The value for which to provide the recorded count + * @return The total count of values recorded in the histogram within the value range that is + * {@literal >=} lowestEquivalentValue(value) and {@literal <=} highestEquivalentValue(value) + */ +int64_t hdr_count_at_value(const struct hdr_histogram* h, int64_t value); + +int64_t hdr_count_at_index(const struct hdr_histogram* h, int32_t index); + +int64_t hdr_value_at_index(const struct hdr_histogram* h, int32_t index); + +struct hdr_iter_percentiles +{ + bool seen_last_value; + int32_t ticks_per_half_distance; + double percentile_to_iterate_to; + double percentile; +}; + +struct hdr_iter_recorded +{ + int64_t count_added_in_this_iteration_step; +}; + +struct hdr_iter_linear +{ + int64_t value_units_per_bucket; + int64_t count_added_in_this_iteration_step; + int64_t next_value_reporting_level; + int64_t next_value_reporting_level_lowest_equivalent; +}; + +struct hdr_iter_log +{ + double log_base; + int64_t count_added_in_this_iteration_step; + int64_t next_value_reporting_level; + int64_t next_value_reporting_level_lowest_equivalent; +}; + +/** + * The basic iterator. This is a generic structure + * that supports all of the types of iteration. Use + * the appropriate initialiser to get the desired + * iteration. + * + * @ + */ +struct hdr_iter +{ + const struct hdr_histogram* h; + /** raw index into the counts array */ + int32_t counts_index; + /** snapshot of the length at the time the iterator is created */ + int64_t total_count; + /** value directly from array for the current counts_index */ + int64_t count; + /** sum of all of the counts up to and including the count at this index */ + int64_t cumulative_count; + /** The current value based on counts_index */ + int64_t value; + int64_t highest_equivalent_value; + int64_t lowest_equivalent_value; + int64_t median_equivalent_value; + int64_t value_iterated_from; + int64_t value_iterated_to; + + union + { + struct hdr_iter_percentiles percentiles; + struct hdr_iter_recorded recorded; + struct hdr_iter_linear linear; + struct hdr_iter_log log; + } specifics; + + bool (* _next_fp)(struct hdr_iter* iter); + +}; + +/** + * Initalises the basic iterator. + * + * @param itr 'This' pointer + * @param h The histogram to iterate over + */ +void hdr_iter_init(struct hdr_iter* iter, const struct hdr_histogram* h); + +/** + * Initialise the iterator for use with percentiles. + */ +void hdr_iter_percentile_init(struct hdr_iter* iter, const struct hdr_histogram* h, int32_t ticks_per_half_distance); + +/** + * Initialise the iterator for use with recorded values. + */ +void hdr_iter_recorded_init(struct hdr_iter* iter, const struct hdr_histogram* h); + +/** + * Initialise the iterator for use with linear values. + */ +void hdr_iter_linear_init( + struct hdr_iter* iter, + const struct hdr_histogram* h, + int64_t value_units_per_bucket); + +/** + * Update the iterator value units per bucket + */ +void hdr_iter_linear_set_value_units_per_bucket(struct hdr_iter* iter, int64_t value_units_per_bucket); + +/** + * Initialise the iterator for use with logarithmic values + */ +void hdr_iter_log_init( + struct hdr_iter* iter, + const struct hdr_histogram* h, + int64_t value_units_first_bucket, + double log_base); + +/** + * Iterate to the next value for the iterator. If there are no more values + * available return faluse. + * + * @param itr 'This' pointer + * @return 'false' if there are no values remaining for this iterator. + */ +bool hdr_iter_next(struct hdr_iter* iter); + +typedef enum +{ + CLASSIC, + CSV +} format_type; + +/** + * Print out a percentile based histogram to the supplied stream. Note that + * this call will not flush the FILE, this is left up to the user. + * + * @param h 'This' pointer + * @param stream The FILE to write the output to + * @param ticks_per_half_distance The number of iteration steps per half-distance to 100% + * @param value_scale Scale the output values by this amount + * @param format_type Format to use, e.g. CSV. + * @return 0 on success, error code on failure. EIO if an error occurs writing + * the output. + */ +int hdr_percentiles_print( + struct hdr_histogram* h, FILE* stream, int32_t ticks_per_half_distance, + double value_scale, format_type format); + +/** +* Internal allocation methods, used by hdr_dbl_histogram. +*/ +struct hdr_histogram_bucket_config +{ + int64_t lowest_discernible_value; + int64_t highest_trackable_value; + int64_t unit_magnitude; + int64_t significant_figures; + int32_t sub_bucket_half_count_magnitude; + int32_t sub_bucket_half_count; + int64_t sub_bucket_mask; + int32_t sub_bucket_count; + int32_t bucket_count; + int32_t counts_len; +}; + +int hdr_calculate_bucket_config( + int64_t lowest_discernible_value, + int64_t highest_trackable_value, + int significant_figures, + struct hdr_histogram_bucket_config* cfg); + +void hdr_init_preallocated(struct hdr_histogram* h, struct hdr_histogram_bucket_config* cfg); + +int64_t hdr_size_of_equivalent_value_range(const struct hdr_histogram* h, int64_t value); + +int64_t hdr_next_non_equivalent_value(const struct hdr_histogram* h, int64_t value); + +int64_t hdr_median_equivalent_value(const struct hdr_histogram* h, int64_t value); + +/** + * Used to reset counters after importing data manually into the histogram, used by the logging code + * and other custom serialisation tools. + */ +void hdr_reset_internal_counters(struct hdr_histogram* h); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/deps/hdr_histogram/hdr_redis_malloc.h b/deps/hdr_histogram/hdr_redis_malloc.h new file mode 100644 index 0000000..d9401ca --- /dev/null +++ b/deps/hdr_histogram/hdr_redis_malloc.h @@ -0,0 +1,13 @@ +#ifndef HDR_MALLOC_H__ +#define HDR_MALLOC_H__ + +void *zmalloc(size_t size); +void *zcalloc_num(size_t num, size_t size); +void *zrealloc(void *ptr, size_t size); +void zfree(void *ptr); + +#define hdr_malloc zmalloc +#define hdr_calloc zcalloc_num +#define hdr_realloc zrealloc +#define hdr_free zfree +#endif diff --git a/deps/hdr_histogram/hdr_tests.h b/deps/hdr_histogram/hdr_tests.h new file mode 100644 index 0000000..c016d3a --- /dev/null +++ b/deps/hdr_histogram/hdr_tests.h @@ -0,0 +1,22 @@ +#ifndef HDR_TESTS_H +#define HDR_TESTS_H + +/* These are functions used in tests and are not intended for normal usage. */ + +#include "hdr_histogram.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int32_t counts_index_for(const struct hdr_histogram* h, int64_t value); +int hdr_encode_compressed(struct hdr_histogram* h, uint8_t** compressed_histogram, size_t* compressed_len); +int hdr_decode_compressed(uint8_t* buffer, size_t length, struct hdr_histogram** histogram); +void hdr_base64_decode_block(const char* input, uint8_t* output); +void hdr_base64_encode_block(const uint8_t* input, char* output); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/deps/hiredis/.github/workflows/build.yml b/deps/hiredis/.github/workflows/build.yml new file mode 100644 index 0000000..362bc77 --- /dev/null +++ b/deps/hiredis/.github/workflows/build.yml @@ -0,0 +1,205 @@ +name: Build and test +on: [push, pull_request] + +jobs: + ubuntu: + name: Ubuntu + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + + - name: Install dependencies + run: | + sudo add-apt-repository -y ppa:chris-lea/redis-server + sudo apt-get update + sudo apt-get install -y redis-server valgrind libevent-dev + + - name: Build using cmake + env: + EXTRA_CMAKE_OPTS: -DENABLE_EXAMPLES:BOOL=ON -DENABLE_SSL:BOOL=ON -DENABLE_SSL_TESTS:BOOL=ON -DENABLE_ASYNC_TESTS:BOOL=ON + CFLAGS: -Werror + CXXFLAGS: -Werror + run: mkdir build-ubuntu && cd build-ubuntu && cmake .. + + - name: Build using makefile + run: USE_SSL=1 TEST_ASYNC=1 make + + - name: Run tests + env: + SKIPS_AS_FAILS: 1 + TEST_SSL: 1 + run: $GITHUB_WORKSPACE/test.sh + + # - name: Run tests under valgrind + # env: + # SKIPS_AS_FAILS: 1 + # TEST_PREFIX: valgrind --error-exitcode=99 --track-origins=yes --leak-check=full + # run: $GITHUB_WORKSPACE/test.sh + + centos7: + name: CentOS 7 + runs-on: ubuntu-latest + container: centos:7 + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + + - name: Install dependencies + run: | + yum -y install http://rpms.remirepo.net/enterprise/remi-release-7.rpm + yum -y --enablerepo=remi install redis + yum -y install gcc gcc-c++ make openssl openssl-devel cmake3 valgrind libevent-devel + + - name: Build using cmake + env: + EXTRA_CMAKE_OPTS: -DENABLE_EXAMPLES:BOOL=ON -DENABLE_SSL:BOOL=ON -DENABLE_SSL_TESTS:BOOL=ON -DENABLE_ASYNC_TESTS:BOOL=ON + CFLAGS: -Werror + CXXFLAGS: -Werror + run: mkdir build-centos7 && cd build-centos7 && cmake3 .. + + - name: Build using Makefile + run: USE_SSL=1 TEST_ASYNC=1 make + + - name: Run tests + env: + SKIPS_AS_FAILS: 1 + TEST_SSL: 1 + run: $GITHUB_WORKSPACE/test.sh + + - name: Run tests under valgrind + env: + SKIPS_AS_FAILS: 1 + TEST_SSL: 1 + TEST_PREFIX: valgrind --error-exitcode=99 --track-origins=yes --leak-check=full + run: $GITHUB_WORKSPACE/test.sh + + centos8: + name: RockyLinux 8 + runs-on: ubuntu-latest + container: rockylinux:8 + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + + - name: Install dependencies + run: | + dnf -y install https://rpms.remirepo.net/enterprise/remi-release-8.rpm + dnf -y module install redis:remi-6.0 + dnf -y group install "Development Tools" + dnf -y install openssl-devel cmake valgrind libevent-devel + + - name: Build using cmake + env: + EXTRA_CMAKE_OPTS: -DENABLE_EXAMPLES:BOOL=ON -DENABLE_SSL:BOOL=ON -DENABLE_SSL_TESTS:BOOL=ON -DENABLE_ASYNC_TESTS:BOOL=ON + CFLAGS: -Werror + CXXFLAGS: -Werror + run: mkdir build-centos8 && cd build-centos8 && cmake .. + + - name: Build using Makefile + run: USE_SSL=1 TEST_ASYNC=1 make + + - name: Run tests + env: + SKIPS_AS_FAILS: 1 + TEST_SSL: 1 + run: $GITHUB_WORKSPACE/test.sh + + - name: Run tests under valgrind + env: + SKIPS_AS_FAILS: 1 + TEST_SSL: 1 + TEST_PREFIX: valgrind --error-exitcode=99 --track-origins=yes --leak-check=full + run: $GITHUB_WORKSPACE/test.sh + + freebsd: + runs-on: macos-10.15 + name: FreeBSD + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + + - name: Build in FreeBSD + uses: vmactions/freebsd-vm@v0.1.5 + with: + prepare: pkg install -y gmake cmake + run: | + mkdir build && cd build && cmake .. && make && cd .. + gmake + + macos: + name: macOS + runs-on: macos-latest + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + + - name: Install dependencies + run: | + brew install openssl redis + + - name: Build hiredis + run: USE_SSL=1 make + + - name: Run tests + env: + TEST_SSL: 1 + run: $GITHUB_WORKSPACE/test.sh + + windows: + name: Windows + runs-on: windows-latest + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + + - name: Install dependencies + run: | + choco install -y ninja memurai-developer + + - uses: ilammy/msvc-dev-cmd@v1 + - name: Build hiredis + run: | + mkdir build && cd build + cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_EXAMPLES=ON + ninja -v + + - name: Run tests + run: | + ./build/hiredis-test.exe + + - name: Setup cygwin + uses: egor-tensin/setup-cygwin@v3 + with: + platform: x64 + packages: make git gcc-core + + - name: Build in cygwin + env: + HIREDIS_PATH: ${{ github.workspace }} + run: | + build_hiredis() { + cd $(cygpath -u $HIREDIS_PATH) + git clean -xfd + make + } + build_hiredis + shell: C:\tools\cygwin\bin\bash.exe --login --norc -eo pipefail -o igncr '{0}' diff --git a/deps/hiredis/.gitignore b/deps/hiredis/.gitignore new file mode 100644 index 0000000..056959f --- /dev/null +++ b/deps/hiredis/.gitignore @@ -0,0 +1,9 @@ +/hiredis-test +/examples/hiredis-example* +/*.o +/*.so +/*.dylib +/*.a +/*.pc +*.dSYM +tags diff --git a/deps/hiredis/.travis.yml b/deps/hiredis/.travis.yml new file mode 100644 index 0000000..1e9b556 --- /dev/null +++ b/deps/hiredis/.travis.yml @@ -0,0 +1,125 @@ +language: c +compiler: + - gcc + - clang + +os: + - linux + - osx + +dist: bionic + +branches: + only: + - staging + - trying + - master + - /^release\/.*$/ + +install: + - if [ "$TRAVIS_COMPILER" != "mingw" ]; then + wget https://github.com/redis/redis/archive/6.0.6.tar.gz; + tar -xzvf 6.0.6.tar.gz; + pushd redis-6.0.6 && BUILD_TLS=yes make && export PATH=$PWD/src:$PATH && popd; + fi; + +before_script: + - if [ "$TRAVIS_OS_NAME" == "osx" ]; then + curl -O https://distfiles.macports.org/MacPorts/MacPorts-2.6.2-10.13-HighSierra.pkg; + sudo installer -pkg MacPorts-2.6.2-10.13-HighSierra.pkg -target /; + export PATH=$PATH:/opt/local/bin && sudo port -v selfupdate; + sudo port -N install openssl redis; + fi; + +addons: + apt: + packages: + - libc6-dbg + - libc6-dev + - libc6:i386 + - libc6-dev-i386 + - libc6-dbg:i386 + - gcc-multilib + - g++-multilib + - libssl-dev + - libssl-dev:i386 + - valgrind + +env: + - BITS="32" + - BITS="64" + +script: + - EXTRA_CMAKE_OPTS="-DENABLE_EXAMPLES:BOOL=ON -DENABLE_SSL:BOOL=ON -DENABLE_SSL_TESTS:BOOL=ON"; + if [ "$TRAVIS_OS_NAME" == "osx" ]; then + if [ "$BITS" == "32" ]; then + CFLAGS="-m32 -Werror"; + CXXFLAGS="-m32 -Werror"; + LDFLAGS="-m32"; + EXTRA_CMAKE_OPTS=; + else + CFLAGS="-Werror"; + CXXFLAGS="-Werror"; + fi; + else + TEST_PREFIX="valgrind --track-origins=yes --leak-check=full"; + if [ "$BITS" == "32" ]; then + CFLAGS="-m32 -Werror"; + CXXFLAGS="-m32 -Werror"; + LDFLAGS="-m32"; + EXTRA_CMAKE_OPTS=; + else + CFLAGS="-Werror"; + CXXFLAGS="-Werror"; + fi; + fi; + export CFLAGS CXXFLAGS LDFLAGS TEST_PREFIX EXTRA_CMAKE_OPTS + - make && make clean; + if [ "$TRAVIS_OS_NAME" == "osx" ]; then + if [ "$BITS" == "64" ]; then + OPENSSL_PREFIX="$(ls -d /usr/local/Cellar/openssl@1.1/*)" USE_SSL=1 make; + fi; + else + USE_SSL=1 make; + fi; + - mkdir build/ && cd build/ + - cmake .. ${EXTRA_CMAKE_OPTS} + - make VERBOSE=1 + - if [ "$BITS" == "64" ]; then + TEST_SSL=1 SKIPS_AS_FAILS=1 ctest -V; + else + SKIPS_AS_FAILS=1 ctest -V; + fi; + +jobs: + include: + # Windows MinGW cross compile on Linux + - os: linux + dist: xenial + compiler: mingw + addons: + apt: + packages: + - ninja-build + - gcc-mingw-w64-x86-64 + - g++-mingw-w64-x86-64 + script: + - mkdir build && cd build + - CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_BUILD_WITH_INSTALL_RPATH=on + - ninja -v + + # Windows MSVC 2017 + - os: windows + compiler: msvc + env: + - MATRIX_EVAL="CC=cl.exe && CXX=cl.exe" + before_install: + - eval "${MATRIX_EVAL}" + install: + - choco install ninja + - choco install -y memurai-developer + script: + - mkdir build && cd build + - cmd.exe //C 'C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\Auxiliary\Build\vcvarsall.bat' amd64 '&&' + cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_EXAMPLES=ON '&&' ninja -v + - ./hiredis-test.exe diff --git a/deps/hiredis/CHANGELOG.md b/deps/hiredis/CHANGELOG.md new file mode 100644 index 0000000..2a2bc31 --- /dev/null +++ b/deps/hiredis/CHANGELOG.md @@ -0,0 +1,364 @@ +## [1.0.2](https://github.com/redis/hiredis/tree/v1.0.2) - (2021-10-07) + +Announcing Hiredis v1.0.2, which fixes CVE-2021-32765 but returns the SONAME to the correct value of `1.0.0`. + +- [Revert SONAME bump](https://github.com/redis/hiredis/commit/d4e6f109a064690cde64765c654e679fea1d3548) + ([Michael Grunder](https://github.com/michael-grunder)) + +## [1.0.1](https://github.com/redis/hiredis/tree/v1.0.1) - (2021-10-04) + +This release erroneously bumped the SONAME, please use [1.0.2](https://github.com/redis/hiredis/tree/v1.0.2) + +Announcing Hiredis v1.0.1, a security release fixing CVE-2021-32765 + +- Fix for [CVE-2021-32765](https://github.com/redis/hiredis/security/advisories/GHSA-hfm9-39pp-55p2) + [commit](https://github.com/redis/hiredis/commit/76a7b10005c70babee357a7d0f2becf28ec7ed1e) + ([Yossi Gottlieb](https://github.com/yossigo)) + +_Thanks to [Yossi Gottlieb](https://github.com/yossigo) for the security fix and to [Microsoft Security Vulnerability Research](https://www.microsoft.com/en-us/msrc/msvr) for finding the bug._ :sparkling_heart: + +## [1.0.0](https://github.com/redis/hiredis/tree/v1.0.0) - (2020-08-03) + +Announcing Hiredis v1.0.0, which adds support for RESP3, SSL connections, allocator injection, and better Windows support! :tada: + +_A big thanks to everyone who helped with this release. The following list includes everyone who contributed at least five lines, sorted by lines contributed._ :sparkling_heart: + +[Michael Grunder](https://github.com/michael-grunder), [Yossi Gottlieb](https://github.com/yossigo), +[Mark Nunberg](https://github.com/mnunberg), [Marcus Geelnard](https://github.com/mbitsnbites), +[Justin Brewer](https://github.com/justinbrewer), [Valentino Geron](https://github.com/valentinogeron), +[Minun Dragonation](https://github.com/dragonation), [Omri Steiner](https://github.com/OmriSteiner), +[Sangmoon Yi](https://github.com/jman-krafton), [Jinjiazh](https://github.com/jinjiazhang), +[Odin Hultgren Van Der Horst](https://github.com/Miniwoffer), [Muhammad Zahalqa](https://github.com/tryfinally), +[Nick Rivera](https://github.com/heronr), [Qi Yang](https://github.com/movebean), +[kevin1018](https://github.com/kevin1018) + +[Full Changelog](https://github.com/redis/hiredis/compare/v0.14.1...v1.0.0) + +**BREAKING CHANGES**: + +* `redisOptions` now has two timeout fields. One for connecting, and one for commands. If you're presently using `options->timeout` you will need to change it to use `options->connect_timeout`. (See [example](https://github.com/redis/hiredis/commit/38b5ae543f5c99eb4ccabbe277770fc6bc81226f#diff-86ba39d37aa829c8c82624cce4f049fbL36)) + +* Bulk and multi-bulk lengths less than -1 or greater than `LLONG_MAX` are now protocol errors. This is consistent + with the RESP specification. On 32-bit platforms, the upper bound is lowered to `SIZE_MAX`. + +* `redisReplyObjectFunctions.createArray` now takes `size_t` for its length parameter. + +**New features:** +- Support for RESP3 + [\#697](https://github.com/redis/hiredis/pull/697), + [\#805](https://github.com/redis/hiredis/pull/805), + [\#819](https://github.com/redis/hiredis/pull/819), + [\#841](https://github.com/redis/hiredis/pull/841) + ([Yossi Gottlieb](https://github.com/yossigo), [Michael Grunder](https://github.com/michael-grunder)) +- Support for SSL connections + [\#645](https://github.com/redis/hiredis/pull/645), + [\#699](https://github.com/redis/hiredis/pull/699), + [\#702](https://github.com/redis/hiredis/pull/702), + [\#708](https://github.com/redis/hiredis/pull/708), + [\#711](https://github.com/redis/hiredis/pull/711), + [\#821](https://github.com/redis/hiredis/pull/821), + [more](https://github.com/redis/hiredis/pulls?q=is%3Apr+is%3Amerged+SSL) + ([Mark Nunberg](https://github.com/mnunberg), [Yossi Gottlieb](https://github.com/yossigo)) +- Run-time allocator injection + [\#800](https://github.com/redis/hiredis/pull/800) + ([Michael Grunder](https://github.com/michael-grunder)) +- Improved Windows support (including MinGW and Windows CI) + [\#652](https://github.com/redis/hiredis/pull/652), + [\#663](https://github.com/redis/hiredis/pull/663) + ([Marcus Geelnard](https://www.bitsnbites.eu/author/m/)) +- Adds support for distinct connect and command timeouts + [\#839](https://github.com/redis/hiredis/pull/839), + [\#829](https://github.com/redis/hiredis/pull/829) + ([Valentino Geron](https://github.com/valentinogeron)) +- Add generic pointer and destructor to `redisContext` that users can use for context. + [\#855](https://github.com/redis/hiredis/pull/855) + ([Michael Grunder](https://github.com/michael-grunder)) + +**Closed issues (that involved code changes):** + +- Makefile does not install TLS libraries [\#809](https://github.com/redis/hiredis/issues/809) +- redisConnectWithOptions should not set command timeout [\#722](https://github.com/redis/hiredis/issues/722), [\#829](https://github.com/redis/hiredis/pull/829) ([valentinogeron](https://github.com/valentinogeron)) +- Fix integer overflow in `sdsrange` [\#827](https://github.com/redis/hiredis/issues/827) +- INFO & CLUSTER commands failed when using RESP3 [\#802](https://github.com/redis/hiredis/issues/802) +- Windows compatibility patches [\#687](https://github.com/redis/hiredis/issues/687), [\#838](https://github.com/redis/hiredis/issues/838), [\#842](https://github.com/redis/hiredis/issues/842) +- RESP3 PUSH messages incorrectly use pending callback [\#825](https://github.com/redis/hiredis/issues/825) +- Asynchronous PSUBSCRIBE command fails when using RESP3 [\#815](https://github.com/redis/hiredis/issues/815) +- New SSL API [\#804](https://github.com/redis/hiredis/issues/804), [\#813](https://github.com/redis/hiredis/issues/813) +- Hard-coded limit of nested reply depth [\#794](https://github.com/redis/hiredis/issues/794) +- Fix TCP_NODELAY in Windows/OSX [\#679](https://github.com/redis/hiredis/issues/679), [\#690](https://github.com/redis/hiredis/issues/690), [\#779](https://github.com/redis/hiredis/issues/779), [\#785](https://github.com/redis/hiredis/issues/785), +- Added timers to libev adapter. [\#778](https://github.com/redis/hiredis/issues/778), [\#795](https://github.com/redis/hiredis/pull/795) +- Initialization discards const qualifier [\#777](https://github.com/redis/hiredis/issues/777) +- \[BUG\]\[MinGW64\] Error setting socket timeout [\#775](https://github.com/redis/hiredis/issues/775) +- undefined reference to hi_malloc [\#769](https://github.com/redis/hiredis/issues/769) +- hiredis pkg-config file incorrectly ignores multiarch libdir spec'n [\#767](https://github.com/redis/hiredis/issues/767) +- Don't use -G to build shared object on Solaris [\#757](https://github.com/redis/hiredis/issues/757) +- error when make USE\_SSL=1 [\#748](https://github.com/redis/hiredis/issues/748) +- Allow to change SSL Mode [\#646](https://github.com/redis/hiredis/issues/646) +- hiredis/adapters/libevent.h memleak [\#618](https://github.com/redis/hiredis/issues/618) +- redisLibuvPoll crash when server closes the connetion [\#545](https://github.com/redis/hiredis/issues/545) +- about redisAsyncDisconnect question [\#518](https://github.com/redis/hiredis/issues/518) +- hiredis adapters libuv error for help [\#508](https://github.com/redis/hiredis/issues/508) +- API/ABI changes analysis [\#506](https://github.com/redis/hiredis/issues/506) +- Memory leak patch in Redis [\#502](https://github.com/redis/hiredis/issues/502) +- Remove the depth limitation [\#421](https://github.com/redis/hiredis/issues/421) + +**Merged pull requests:** + +- Move SSL management to a distinct private pointer [\#855](https://github.com/redis/hiredis/pull/855) ([michael-grunder](https://github.com/michael-grunder)) +- Move include to sockcompat.h to maintain style [\#850](https://github.com/redis/hiredis/pull/850) ([michael-grunder](https://github.com/michael-grunder)) +- Remove erroneous tag and add license to push example [\#849](https://github.com/redis/hiredis/pull/849) ([michael-grunder](https://github.com/michael-grunder)) +- fix windows compiling with mingw [\#848](https://github.com/redis/hiredis/pull/848) ([rmalizia44](https://github.com/rmalizia44)) +- Some Windows quality of life improvements. [\#846](https://github.com/redis/hiredis/pull/846) ([michael-grunder](https://github.com/michael-grunder)) +- Use \_WIN32 define instead of WIN32 [\#845](https://github.com/redis/hiredis/pull/845) ([michael-grunder](https://github.com/michael-grunder)) +- Non Linux CI fixes [\#844](https://github.com/redis/hiredis/pull/844) ([michael-grunder](https://github.com/michael-grunder)) +- Resp3 oob push support [\#841](https://github.com/redis/hiredis/pull/841) ([michael-grunder](https://github.com/michael-grunder)) +- fix \#785: defer TCP\_NODELAY in async tcp connections [\#836](https://github.com/redis/hiredis/pull/836) ([OmriSteiner](https://github.com/OmriSteiner)) +- sdsrange overflow fix [\#830](https://github.com/redis/hiredis/pull/830) ([michael-grunder](https://github.com/michael-grunder)) +- Use explicit pointer casting for c++ compatibility [\#826](https://github.com/redis/hiredis/pull/826) ([aureus1](https://github.com/aureus1)) +- Document allocator injection and completeness fix in test.c [\#824](https://github.com/redis/hiredis/pull/824) ([michael-grunder](https://github.com/michael-grunder)) +- Use unique names for allocator struct members [\#823](https://github.com/redis/hiredis/pull/823) ([michael-grunder](https://github.com/michael-grunder)) +- New SSL API to replace redisSecureConnection\(\). [\#821](https://github.com/redis/hiredis/pull/821) ([yossigo](https://github.com/yossigo)) +- Add logic to handle RESP3 push messages [\#819](https://github.com/redis/hiredis/pull/819) ([michael-grunder](https://github.com/michael-grunder)) +- Use standrad isxdigit instead of custom helper function. [\#814](https://github.com/redis/hiredis/pull/814) ([tryfinally](https://github.com/tryfinally)) +- Fix missing SSL build/install options. [\#812](https://github.com/redis/hiredis/pull/812) ([yossigo](https://github.com/yossigo)) +- Add link to ABI tracker [\#808](https://github.com/redis/hiredis/pull/808) ([michael-grunder](https://github.com/michael-grunder)) +- Resp3 verbatim string support [\#805](https://github.com/redis/hiredis/pull/805) ([michael-grunder](https://github.com/michael-grunder)) +- Allow users to replace allocator and handle OOM everywhere. [\#800](https://github.com/redis/hiredis/pull/800) ([michael-grunder](https://github.com/michael-grunder)) +- Remove nested depth limitation. [\#797](https://github.com/redis/hiredis/pull/797) ([michael-grunder](https://github.com/michael-grunder)) +- Attempt to fix compilation on Solaris [\#796](https://github.com/redis/hiredis/pull/796) ([michael-grunder](https://github.com/michael-grunder)) +- Support timeouts in libev adapater [\#795](https://github.com/redis/hiredis/pull/795) ([michael-grunder](https://github.com/michael-grunder)) +- Fix pkgconfig when installing to a custom lib dir [\#793](https://github.com/redis/hiredis/pull/793) ([michael-grunder](https://github.com/michael-grunder)) +- Fix USE\_SSL=1 make/cmake on OSX and CMake tests [\#789](https://github.com/redis/hiredis/pull/789) ([michael-grunder](https://github.com/michael-grunder)) +- Use correct libuv call on Windows [\#784](https://github.com/redis/hiredis/pull/784) ([michael-grunder](https://github.com/michael-grunder)) +- Added CMake package config and fixed hiredis\_ssl on Windows [\#783](https://github.com/redis/hiredis/pull/783) ([michael-grunder](https://github.com/michael-grunder)) +- CMake: Set hiredis\_ssl shared object version. [\#780](https://github.com/redis/hiredis/pull/780) ([yossigo](https://github.com/yossigo)) +- Win32 tests and timeout fix [\#776](https://github.com/redis/hiredis/pull/776) ([michael-grunder](https://github.com/michael-grunder)) +- Provides an optional cleanup callback for async data. [\#768](https://github.com/redis/hiredis/pull/768) ([heronr](https://github.com/heronr)) +- Housekeeping fixes [\#764](https://github.com/redis/hiredis/pull/764) ([michael-grunder](https://github.com/michael-grunder)) +- install alloc.h [\#756](https://github.com/redis/hiredis/pull/756) ([ch1aki](https://github.com/ch1aki)) +- fix spelling mistakes [\#746](https://github.com/redis/hiredis/pull/746) ([ShooterIT](https://github.com/ShooterIT)) +- Free the reply in redisGetReply when passed NULL [\#741](https://github.com/redis/hiredis/pull/741) ([michael-grunder](https://github.com/michael-grunder)) +- Fix dead code in sslLogCallback relating to should\_log variable. [\#737](https://github.com/redis/hiredis/pull/737) ([natoscott](https://github.com/natoscott)) +- Fix typo in dict.c. [\#731](https://github.com/redis/hiredis/pull/731) ([Kevin-Xi](https://github.com/Kevin-Xi)) +- Adding an option to DISABLE\_TESTS [\#727](https://github.com/redis/hiredis/pull/727) ([pbotros](https://github.com/pbotros)) +- Update README with SSL support. [\#720](https://github.com/redis/hiredis/pull/720) ([yossigo](https://github.com/yossigo)) +- Fixes leaks in unit tests [\#715](https://github.com/redis/hiredis/pull/715) ([michael-grunder](https://github.com/michael-grunder)) +- SSL Tests [\#711](https://github.com/redis/hiredis/pull/711) ([yossigo](https://github.com/yossigo)) +- SSL Reorganization [\#708](https://github.com/redis/hiredis/pull/708) ([yossigo](https://github.com/yossigo)) +- Fix MSVC build. [\#706](https://github.com/redis/hiredis/pull/706) ([yossigo](https://github.com/yossigo)) +- SSL: Properly report SSL\_connect\(\) errors. [\#702](https://github.com/redis/hiredis/pull/702) ([yossigo](https://github.com/yossigo)) +- Silent SSL trace to stdout by default. [\#699](https://github.com/redis/hiredis/pull/699) ([yossigo](https://github.com/yossigo)) +- Port RESP3 support from Redis. [\#697](https://github.com/redis/hiredis/pull/697) ([yossigo](https://github.com/yossigo)) +- Removed whitespace before newline [\#691](https://github.com/redis/hiredis/pull/691) ([Miniwoffer](https://github.com/Miniwoffer)) +- Add install adapters header files [\#688](https://github.com/redis/hiredis/pull/688) ([kevin1018](https://github.com/kevin1018)) +- Remove unnecessary null check before free [\#684](https://github.com/redis/hiredis/pull/684) ([qlyoung](https://github.com/qlyoung)) +- redisReaderGetReply leak memory [\#671](https://github.com/redis/hiredis/pull/671) ([movebean](https://github.com/movebean)) +- fix timeout code in windows [\#670](https://github.com/redis/hiredis/pull/670) ([jman-krafton](https://github.com/jman-krafton)) +- test: fix errstr matching for musl libc [\#665](https://github.com/redis/hiredis/pull/665) ([ghost](https://github.com/ghost)) +- Windows: MinGW fixes and Windows Travis builders [\#663](https://github.com/redis/hiredis/pull/663) ([mbitsnbites](https://github.com/mbitsnbites)) +- The setsockopt and getsockopt API diffs from BSD socket and WSA one [\#662](https://github.com/redis/hiredis/pull/662) ([dragonation](https://github.com/dragonation)) +- Fix Compile Error On Windows \(Visual Studio\) [\#658](https://github.com/redis/hiredis/pull/658) ([jinjiazhang](https://github.com/jinjiazhang)) +- Fix NXDOMAIN test case [\#653](https://github.com/redis/hiredis/pull/653) ([michael-grunder](https://github.com/michael-grunder)) +- Add MinGW support [\#652](https://github.com/redis/hiredis/pull/652) ([mbitsnbites](https://github.com/mbitsnbites)) +- SSL Support [\#645](https://github.com/redis/hiredis/pull/645) ([mnunberg](https://github.com/mnunberg)) +- Fix Invalid argument after redisAsyncConnectUnix [\#644](https://github.com/redis/hiredis/pull/644) ([codehz](https://github.com/codehz)) +- Makefile: use predefined AR [\#632](https://github.com/redis/hiredis/pull/632) ([Mic92](https://github.com/Mic92)) +- FreeBSD build fix [\#628](https://github.com/redis/hiredis/pull/628) ([devnexen](https://github.com/devnexen)) +- Fix errors not propagating properly with libuv.h. [\#624](https://github.com/redis/hiredis/pull/624) ([yossigo](https://github.com/yossigo)) +- Update README.md [\#621](https://github.com/redis/hiredis/pull/621) ([Crunsher](https://github.com/Crunsher)) +- Fix redisBufferRead documentation [\#620](https://github.com/redis/hiredis/pull/620) ([hacst](https://github.com/hacst)) +- Add CPPFLAGS to REAL\_CFLAGS [\#614](https://github.com/redis/hiredis/pull/614) ([thomaslee](https://github.com/thomaslee)) +- Update createArray to take size\_t [\#597](https://github.com/redis/hiredis/pull/597) ([justinbrewer](https://github.com/justinbrewer)) +- fix common realloc mistake and add null check more [\#580](https://github.com/redis/hiredis/pull/580) ([charsyam](https://github.com/charsyam)) +- Proper error reporting for connect failures [\#578](https://github.com/redis/hiredis/pull/578) ([mnunberg](https://github.com/mnunberg)) + +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* + +## [1.0.0-rc1](https://github.com/redis/hiredis/tree/v1.0.0-rc1) - (2020-07-29) + +_Note: There were no changes to code between v1.0.0-rc1 and v1.0.0 so see v1.0.0 for changelog_ + +### 0.14.1 (2020-03-13) + +* Adds safe allocation wrappers (CVE-2020-7105, #747, #752) (Michael Grunder) + +### 0.14.0 (2018-09-25) +**BREAKING CHANGES**: + +* Change `redisReply.len` to `size_t`, as it denotes the the size of a string + + User code should compare this to `size_t` values as well. + If it was used to compare to other values, casting might be necessary or can be removed, if casting was applied before. + +* Make string2ll static to fix conflict with Redis (Tom Lee [c3188b]) +* Use -dynamiclib instead of -shared for OSX (Ryan Schmidt [a65537]) +* Use string2ll from Redis w/added tests (Michael Grunder [7bef04, 60f622]) +* Makefile - OSX compilation fixes (Ryan Schmidt [881fcb, 0e9af8]) +* Remove redundant NULL checks (Justin Brewer [54acc8, 58e6b8]) +* Fix bulk and multi-bulk length truncation (Justin Brewer [109197]) +* Fix SIGSEGV in OpenBSD by checking for NULL before calling freeaddrinfo (Justin Brewer [546d94]) +* Several POSIX compatibility fixes (Justin Brewer [bbeab8, 49bbaa, d1c1b6]) +* Makefile - Compatibility fixes (Dimitri Vorobiev [3238cf, 12a9d1]) +* Makefile - Fix make install on FreeBSD (Zach Shipko [a2ef2b]) +* Makefile - don't assume $(INSTALL) is cp (Igor Gnatenko [725a96]) +* Separate side-effect causing function from assert and small cleanup (amallia [b46413, 3c3234]) +* Don't send negative values to `__redisAsyncCommand` (Frederik Deweerdt [706129]) +* Fix leak if setsockopt fails (Frederik Deweerdt [e21c9c]) +* Fix libevent leak (zfz [515228]) +* Clean up GCC warning (Ichito Nagata [2ec774]) +* Keep track of errno in `__redisSetErrorFromErrno()` as snprintf may use it (Jin Qing [25cd88]) +* Solaris compilation fix (Donald Whyte [41b07d]) +* Reorder linker arguments when building examples (Tustfarm-heart [06eedd]) +* Keep track of subscriptions in case of rapid subscribe/unsubscribe (Hyungjin Kim [073dc8, be76c5, d46999]) +* libuv use after free fix (Paul Scott [cbb956]) +* Properly close socket fd on reconnect attempt (WSL [64d1ec]) +* Skip valgrind in OSX tests (Jan-Erik Rediger [9deb78]) +* Various updates for Travis testing OSX (Ted Nyman [fa3774, 16a459, bc0ea5]) +* Update libevent (Chris Xin [386802]) +* Change sds.h for building in C++ projects (Ali Volkan ATLI [f5b32e]) +* Use proper format specifier in redisFormatSdsCommandArgv (Paulino Huerta, Jan-Erik Rediger [360a06, 8655a6]) +* Better handling of NULL reply in example code (Jan-Erik Rediger [1b8ed3]) +* Prevent overflow when formatting an error (Jan-Erik Rediger [0335cb]) +* Compatibility fix for strerror_r (Tom Lee [bb1747]) +* Properly detect integer parse/overflow errors (Justin Brewer [93421f]) +* Adds CI for Windows and cygwin fixes (owent, [6c53d6, 6c3e40]) +* Catch a buffer overflow when formatting the error message +* Import latest upstream sds. This breaks applications that are linked against the old hiredis v0.13 +* Fix warnings, when compiled with -Wshadow +* Make hiredis compile in Cygwin on Windows, now CI-tested +* Bulk and multi-bulk lengths less than -1 or greater than `LLONG_MAX` are now + protocol errors. This is consistent with the RESP specification. On 32-bit + platforms, the upper bound is lowered to `SIZE_MAX`. + +* Remove backwards compatibility macro's + +This removes the following old function aliases, use the new name now: + +| Old | New | +| --------------------------- | ---------------------- | +| redisReplyReaderCreate | redisReaderCreate | +| redisReplyReaderCreate | redisReaderCreate | +| redisReplyReaderFree | redisReaderFree | +| redisReplyReaderFeed | redisReaderFeed | +| redisReplyReaderGetReply | redisReaderGetReply | +| redisReplyReaderSetPrivdata | redisReaderSetPrivdata | +| redisReplyReaderGetObject | redisReaderGetObject | +| redisReplyReaderGetError | redisReaderGetError | + +* The `DEBUG` variable in the Makefile was renamed to `DEBUG_FLAGS` + +Previously it broke some builds for people that had `DEBUG` set to some arbitrary value, +due to debugging other software. +By renaming we avoid unintentional name clashes. + +Simply rename `DEBUG` to `DEBUG_FLAGS` in your environment to make it working again. + +### 0.13.3 (2015-09-16) + +* Revert "Clear `REDIS_CONNECTED` flag when connection is closed". +* Make tests pass on FreeBSD (Thanks, Giacomo Olgeni) + + +If the `REDIS_CONNECTED` flag is cleared, +the async onDisconnect callback function will never be called. +This causes problems as the disconnect is never reported back to the user. + +### 0.13.2 (2015-08-25) + +* Prevent crash on pending replies in async code (Thanks, @switch-st) +* Clear `REDIS_CONNECTED` flag when connection is closed (Thanks, Jerry Jacobs) +* Add MacOS X addapter (Thanks, @dizzus) +* Add Qt adapter (Thanks, Pietro Cerutti) +* Add Ivykis adapter (Thanks, Gergely Nagy) + +All adapters are provided as is and are only tested where possible. + +### 0.13.1 (2015-05-03) + +This is a bug fix release. +The new `reconnect` method introduced new struct members, which clashed with pre-defined names in pre-C99 code. +Another commit forced C99 compilation just to make it work, but of course this is not desirable for outside projects. +Other non-C99 code can now use hiredis as usual again. +Sorry for the inconvenience. + +* Fix memory leak in async reply handling (Salvatore Sanfilippo) +* Rename struct member to avoid name clash with pre-c99 code (Alex Balashov, ncopa) + +### 0.13.0 (2015-04-16) + +This release adds a minimal Windows compatibility layer. +The parser, standalone since v0.12.0, can now be compiled on Windows +(and thus used in other client libraries as well) + +* Windows compatibility layer for parser code (tzickel) +* Properly escape data printed to PKGCONF file (Dan Skorupski) +* Fix tests when assert() undefined (Keith Bennett, Matt Stancliff) +* Implement a reconnect method for the client context, this changes the structure of `redisContext` (Aaron Bedra) + +### 0.12.1 (2015-01-26) + +* Fix `make install`: DESTDIR support, install all required files, install PKGCONF in proper location +* Fix `make test` as 32 bit build on 64 bit platform + +### 0.12.0 (2015-01-22) + +* Add optional KeepAlive support + +* Try again on EINTR errors + +* Add libuv adapter + +* Add IPv6 support + +* Remove possibility of multiple close on same fd + +* Add ability to bind source address on connect + +* Add redisConnectFd() and redisFreeKeepFd() + +* Fix getaddrinfo() memory leak + +* Free string if it is unused (fixes memory leak) + +* Improve redisAppendCommandArgv performance 2.5x + +* Add support for SO_REUSEADDR + +* Fix redisvFormatCommand format parsing + +* Add GLib 2.0 adapter + +* Refactor reading code into read.c + +* Fix errno error buffers to not clobber errors + +* Generate pkgconf during build + +* Silence _BSD_SOURCE warnings + +* Improve digit counting for multibulk creation + + +### 0.11.0 + +* Increase the maximum multi-bulk reply depth to 7. + +* Increase the read buffer size from 2k to 16k. + +* Use poll(2) instead of select(2) to support large fds (>= 1024). + +### 0.10.1 + +* Makefile overhaul. Important to check out if you override one or more + variables using environment variables or via arguments to the "make" tool. + +* Issue #45: Fix potential memory leak for a multi bulk reply with 0 elements + being created by the default reply object functions. + +* Issue #43: Don't crash in an asynchronous context when Redis returns an error + reply after the connection has been made (this happens when the maximum + number of connections is reached). + +### 0.10.0 + +* See commit log. diff --git a/deps/hiredis/CMakeLists.txt b/deps/hiredis/CMakeLists.txt new file mode 100644 index 0000000..fe6720b --- /dev/null +++ b/deps/hiredis/CMakeLists.txt @@ -0,0 +1,245 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.4.0) + +OPTION(ENABLE_SSL "Build hiredis_ssl for SSL support" OFF) +OPTION(DISABLE_TESTS "If tests should be compiled or not" OFF) +OPTION(ENABLE_SSL_TESTS "Should we test SSL connections" OFF) +OPTION(ENABLE_ASYNC_TESTS "Should we run all asynchronous API tests" OFF) + +MACRO(getVersionBit name) + SET(VERSION_REGEX "^#define ${name} (.+)$") + FILE(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/hiredis.h" + VERSION_BIT REGEX ${VERSION_REGEX}) + STRING(REGEX REPLACE ${VERSION_REGEX} "\\1" ${name} "${VERSION_BIT}") +ENDMACRO(getVersionBit) + +getVersionBit(HIREDIS_MAJOR) +getVersionBit(HIREDIS_MINOR) +getVersionBit(HIREDIS_PATCH) +getVersionBit(HIREDIS_SONAME) +SET(VERSION "${HIREDIS_MAJOR}.${HIREDIS_MINOR}.${HIREDIS_PATCH}") +MESSAGE("Detected version: ${VERSION}") + +PROJECT(hiredis LANGUAGES "C" VERSION "${VERSION}") +INCLUDE(GNUInstallDirs) + +# Hiredis requires C99 +SET(CMAKE_C_STANDARD 99) +SET(CMAKE_POSITION_INDEPENDENT_CODE ON) +SET(CMAKE_DEBUG_POSTFIX d) + +SET(ENABLE_EXAMPLES OFF CACHE BOOL "Enable building hiredis examples") + +SET(hiredis_sources + alloc.c + async.c + dict.c + hiredis.c + net.c + read.c + sds.c + sockcompat.c) + +SET(hiredis_sources ${hiredis_sources}) + +IF(WIN32) + ADD_COMPILE_DEFINITIONS(_CRT_SECURE_NO_WARNINGS WIN32_LEAN_AND_MEAN) +ENDIF() + +ADD_LIBRARY(hiredis SHARED ${hiredis_sources}) +ADD_LIBRARY(hiredis_static STATIC ${hiredis_sources}) +ADD_LIBRARY(hiredis::hiredis ALIAS hiredis) +ADD_LIBRARY(hiredis::hiredis_static ALIAS hiredis_static) + +SET_TARGET_PROPERTIES(hiredis + PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE + VERSION "${HIREDIS_SONAME}") +SET_TARGET_PROPERTIES(hiredis_static + PROPERTIES COMPILE_PDB_NAME hiredis_static) +SET_TARGET_PROPERTIES(hiredis_static + PROPERTIES COMPILE_PDB_NAME_DEBUG hiredis_static${CMAKE_DEBUG_POSTFIX}) +IF(WIN32 OR MINGW) + TARGET_LINK_LIBRARIES(hiredis PUBLIC ws2_32 crypt32) + TARGET_LINK_LIBRARIES(hiredis_static PUBLIC ws2_32 crypt32) +ELSEIF(CMAKE_SYSTEM_NAME MATCHES "FreeBSD") + TARGET_LINK_LIBRARIES(hiredis PUBLIC m) + TARGET_LINK_LIBRARIES(hiredis_static PUBLIC m) +ELSEIF(CMAKE_SYSTEM_NAME MATCHES "SunOS") + TARGET_LINK_LIBRARIES(hiredis PUBLIC socket) + TARGET_LINK_LIBRARIES(hiredis_static PUBLIC socket) +ENDIF() + +TARGET_INCLUDE_DIRECTORIES(hiredis PUBLIC $ $) +TARGET_INCLUDE_DIRECTORIES(hiredis_static PUBLIC $ $) + +CONFIGURE_FILE(hiredis.pc.in hiredis.pc @ONLY) + +set(CPACK_PACKAGE_VENDOR "Redis") +set(CPACK_PACKAGE_DESCRIPTION "\ +Hiredis is a minimalistic C client library for the Redis database. + +It is minimalistic because it just adds minimal support for the protocol, \ +but at the same time it uses a high level printf-alike API in order to make \ +it much higher level than otherwise suggested by its minimal code base and the \ +lack of explicit bindings for every Redis command. + +Apart from supporting sending commands and receiving replies, it comes with a \ +reply parser that is decoupled from the I/O layer. It is a stream parser designed \ +for easy reusability, which can for instance be used in higher level language bindings \ +for efficient reply parsing. + +Hiredis only supports the binary-safe Redis protocol, so you can use it with any Redis \ +version >= 1.2.0. + +The library comes with multiple APIs. There is the synchronous API, the asynchronous API \ +and the reply parsing API.") +set(CPACK_PACKAGE_HOMEPAGE_URL "https://github.com/redis/hiredis") +set(CPACK_PACKAGE_CONTACT "michael dot grunder at gmail dot com") +set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON) +set(CPACK_RPM_PACKAGE_AUTOREQPROV ON) + +include(CPack) + +INSTALL(TARGETS hiredis hiredis_static + EXPORT hiredis-targets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) + +if (MSVC) + INSTALL(FILES $ + DESTINATION ${CMAKE_INSTALL_BINDIR} + CONFIGURATIONS Debug RelWithDebInfo) + INSTALL(FILES $/$.pdb + DESTINATION ${CMAKE_INSTALL_LIBDIR} + CONFIGURATIONS Debug RelWithDebInfo) +endif() + +# For NuGet packages +INSTALL(FILES hiredis.targets + DESTINATION build/native) + +INSTALL(FILES hiredis.h read.h sds.h async.h alloc.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/hiredis) + +INSTALL(DIRECTORY adapters + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/hiredis) + +INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/hiredis.pc + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) + +export(EXPORT hiredis-targets + FILE "${CMAKE_CURRENT_BINARY_DIR}/hiredis-targets.cmake" + NAMESPACE hiredis::) + +SET(CMAKE_CONF_INSTALL_DIR share/hiredis) +SET(INCLUDE_INSTALL_DIR include) +include(CMakePackageConfigHelpers) +configure_package_config_file(hiredis-config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/hiredis-config.cmake + INSTALL_DESTINATION ${CMAKE_CONF_INSTALL_DIR} + PATH_VARS INCLUDE_INSTALL_DIR) + +INSTALL(EXPORT hiredis-targets + FILE hiredis-targets.cmake + NAMESPACE hiredis:: + DESTINATION ${CMAKE_CONF_INSTALL_DIR}) + +INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/hiredis-config.cmake + DESTINATION ${CMAKE_CONF_INSTALL_DIR}) + + +IF(ENABLE_SSL) + IF (NOT OPENSSL_ROOT_DIR) + IF (APPLE) + SET(OPENSSL_ROOT_DIR "/usr/local/opt/openssl") + ENDIF() + ENDIF() + FIND_PACKAGE(OpenSSL REQUIRED) + SET(hiredis_ssl_sources + ssl.c) + ADD_LIBRARY(hiredis_ssl SHARED + ${hiredis_ssl_sources}) + ADD_LIBRARY(hiredis_ssl_static STATIC + ${hiredis_ssl_sources}) + + IF (APPLE) + SET_PROPERTY(TARGET hiredis_ssl PROPERTY LINK_FLAGS "-Wl,-undefined -Wl,dynamic_lookup") + ENDIF() + + SET_TARGET_PROPERTIES(hiredis_ssl + PROPERTIES + WINDOWS_EXPORT_ALL_SYMBOLS TRUE + VERSION "${HIREDIS_SONAME}") + SET_TARGET_PROPERTIES(hiredis_ssl_static + PROPERTIES COMPILE_PDB_NAME hiredis_ssl_static) + SET_TARGET_PROPERTIES(hiredis_ssl_static + PROPERTIES COMPILE_PDB_NAME_DEBUG hiredis_ssl_static${CMAKE_DEBUG_POSTFIX}) + + TARGET_INCLUDE_DIRECTORIES(hiredis_ssl PRIVATE "${OPENSSL_INCLUDE_DIR}") + TARGET_INCLUDE_DIRECTORIES(hiredis_ssl_static PRIVATE "${OPENSSL_INCLUDE_DIR}") + + TARGET_LINK_LIBRARIES(hiredis_ssl PRIVATE ${OPENSSL_LIBRARIES}) + IF (WIN32 OR MINGW) + TARGET_LINK_LIBRARIES(hiredis_ssl PRIVATE hiredis) + TARGET_LINK_LIBRARIES(hiredis_ssl_static PUBLIC hiredis_static) + ENDIF() + CONFIGURE_FILE(hiredis_ssl.pc.in hiredis_ssl.pc @ONLY) + + INSTALL(TARGETS hiredis_ssl hiredis_ssl_static + EXPORT hiredis_ssl-targets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) + + if (MSVC) + INSTALL(FILES $ + DESTINATION ${CMAKE_INSTALL_BINDIR} + CONFIGURATIONS Debug RelWithDebInfo) + INSTALL(FILES $/$.pdb + DESTINATION ${CMAKE_INSTALL_LIBDIR} + CONFIGURATIONS Debug RelWithDebInfo) + endif() + + INSTALL(FILES hiredis_ssl.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/hiredis) + + INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/hiredis_ssl.pc + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) + + export(EXPORT hiredis_ssl-targets + FILE "${CMAKE_CURRENT_BINARY_DIR}/hiredis_ssl-targets.cmake" + NAMESPACE hiredis::) + + SET(CMAKE_CONF_INSTALL_DIR share/hiredis_ssl) + configure_package_config_file(hiredis_ssl-config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/hiredis_ssl-config.cmake + INSTALL_DESTINATION ${CMAKE_CONF_INSTALL_DIR} + PATH_VARS INCLUDE_INSTALL_DIR) + + INSTALL(EXPORT hiredis_ssl-targets + FILE hiredis_ssl-targets.cmake + NAMESPACE hiredis:: + DESTINATION ${CMAKE_CONF_INSTALL_DIR}) + + INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/hiredis_ssl-config.cmake + DESTINATION ${CMAKE_CONF_INSTALL_DIR}) +ENDIF() + +IF(NOT DISABLE_TESTS) + ENABLE_TESTING() + ADD_EXECUTABLE(hiredis-test test.c) + TARGET_LINK_LIBRARIES(hiredis-test hiredis) + IF(ENABLE_SSL_TESTS) + ADD_DEFINITIONS(-DHIREDIS_TEST_SSL=1) + TARGET_LINK_LIBRARIES(hiredis-test hiredis_ssl) + ENDIF() + IF(ENABLE_ASYNC_TESTS) + ADD_DEFINITIONS(-DHIREDIS_TEST_ASYNC=1) + TARGET_LINK_LIBRARIES(hiredis-test event) + ENDIF() + ADD_TEST(NAME hiredis-test + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test.sh) +ENDIF() + +# Add examples +IF(ENABLE_EXAMPLES) + ADD_SUBDIRECTORY(examples) +ENDIF(ENABLE_EXAMPLES) diff --git a/deps/hiredis/COPYING b/deps/hiredis/COPYING new file mode 100644 index 0000000..a5fc973 --- /dev/null +++ b/deps/hiredis/COPYING @@ -0,0 +1,29 @@ +Copyright (c) 2009-2011, Salvatore Sanfilippo +Copyright (c) 2010-2011, Pieter Noordhuis + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of Redis nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/deps/hiredis/Makefile b/deps/hiredis/Makefile new file mode 100644 index 0000000..a2ad84c --- /dev/null +++ b/deps/hiredis/Makefile @@ -0,0 +1,338 @@ +# Hiredis Makefile +# Copyright (C) 2010-2011 Salvatore Sanfilippo +# Copyright (C) 2010-2011 Pieter Noordhuis +# This file is released under the BSD license, see the COPYING file + +OBJ=alloc.o net.o hiredis.o sds.o async.o read.o sockcompat.o +EXAMPLES=hiredis-example hiredis-example-libevent hiredis-example-libev hiredis-example-glib hiredis-example-push +TESTS=hiredis-test +LIBNAME=libhiredis +PKGCONFNAME=hiredis.pc + +HIREDIS_MAJOR=$(shell grep HIREDIS_MAJOR hiredis.h | awk '{print $$3}') +HIREDIS_MINOR=$(shell grep HIREDIS_MINOR hiredis.h | awk '{print $$3}') +HIREDIS_PATCH=$(shell grep HIREDIS_PATCH hiredis.h | awk '{print $$3}') +HIREDIS_SONAME=$(shell grep HIREDIS_SONAME hiredis.h | awk '{print $$3}') + +# Installation related variables and target +PREFIX?=/usr/local +INCLUDE_PATH?=include/hiredis +LIBRARY_PATH?=lib +PKGCONF_PATH?=pkgconfig +INSTALL_INCLUDE_PATH= $(DESTDIR)$(PREFIX)/$(INCLUDE_PATH) +INSTALL_LIBRARY_PATH= $(DESTDIR)$(PREFIX)/$(LIBRARY_PATH) +INSTALL_PKGCONF_PATH= $(INSTALL_LIBRARY_PATH)/$(PKGCONF_PATH) + +# redis-server configuration used for testing +REDIS_PORT=56379 +REDIS_SERVER=redis-server +define REDIS_TEST_CONFIG + daemonize yes + pidfile /tmp/hiredis-test-redis.pid + port $(REDIS_PORT) + bind 127.0.0.1 + unixsocket /tmp/hiredis-test-redis.sock +endef +export REDIS_TEST_CONFIG + +# Fallback to gcc when $CC is not in $PATH. +CC:=$(shell sh -c 'type $${CC%% *} >/dev/null 2>/dev/null && echo $(CC) || echo gcc') +CXX:=$(shell sh -c 'type $${CXX%% *} >/dev/null 2>/dev/null && echo $(CXX) || echo g++') +OPTIMIZATION?=-O3 +WARNINGS=-Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers +DEBUG_FLAGS?= -g -ggdb +REAL_CFLAGS=$(OPTIMIZATION) -fPIC $(CPPFLAGS) $(CFLAGS) $(WARNINGS) $(DEBUG_FLAGS) +REAL_LDFLAGS=$(LDFLAGS) + +DYLIBSUFFIX=so +STLIBSUFFIX=a +DYLIB_MINOR_NAME=$(LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_SONAME) +DYLIB_MAJOR_NAME=$(LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_MAJOR) +DYLIBNAME=$(LIBNAME).$(DYLIBSUFFIX) + +DYLIB_MAKE_CMD=$(CC) -shared -Wl,-soname,$(DYLIB_MINOR_NAME) +STLIBNAME=$(LIBNAME).$(STLIBSUFFIX) +STLIB_MAKE_CMD=$(AR) rcs + +#################### SSL variables start #################### +SSL_OBJ=ssl.o +SSL_LIBNAME=libhiredis_ssl +SSL_PKGCONFNAME=hiredis_ssl.pc +SSL_INSTALLNAME=install-ssl +SSL_DYLIB_MINOR_NAME=$(SSL_LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_SONAME) +SSL_DYLIB_MAJOR_NAME=$(SSL_LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_MAJOR) +SSL_DYLIBNAME=$(SSL_LIBNAME).$(DYLIBSUFFIX) +SSL_STLIBNAME=$(SSL_LIBNAME).$(STLIBSUFFIX) +SSL_DYLIB_MAKE_CMD=$(CC) -shared -Wl,-soname,$(SSL_DYLIB_MINOR_NAME) + +USE_SSL?=0 +ifeq ($(USE_SSL),1) + # This is required for test.c only + CFLAGS+=-DHIREDIS_TEST_SSL + EXAMPLES+=hiredis-example-ssl hiredis-example-libevent-ssl + SSL_STLIB=$(SSL_STLIBNAME) + SSL_DYLIB=$(SSL_DYLIBNAME) + SSL_PKGCONF=$(SSL_PKGCONFNAME) + SSL_INSTALL=$(SSL_INSTALLNAME) +else + SSL_STLIB= + SSL_DYLIB= + SSL_PKGCONF= + SSL_INSTALL= +endif +##################### SSL variables end ##################### + + +# Platform-specific overrides +uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') + +# This is required for test.c only +ifeq ($(TEST_ASYNC),1) + export CFLAGS+=-DHIREDIS_TEST_ASYNC +endif + +ifeq ($(USE_SSL),1) + ifeq ($(uname_S),Linux) + ifdef OPENSSL_PREFIX + CFLAGS+=-I$(OPENSSL_PREFIX)/include + SSL_LDFLAGS+=-L$(OPENSSL_PREFIX)/lib -lssl -lcrypto + else + SSL_LDFLAGS=-lssl -lcrypto + endif + else + OPENSSL_PREFIX?=/usr/local/opt/openssl + CFLAGS+=-I$(OPENSSL_PREFIX)/include + SSL_LDFLAGS+=-L$(OPENSSL_PREFIX)/lib -lssl -lcrypto + endif +endif + +ifeq ($(uname_S),FreeBSD) + LDFLAGS+=-lm + IS_GCC=$(shell sh -c '$(CC) --version 2>/dev/null |egrep -i -c "gcc"') + ifeq ($(IS_GCC),1) + REAL_CFLAGS+=-pedantic + endif +else + REAL_CFLAGS+=-pedantic +endif + +ifeq ($(uname_S),SunOS) + IS_SUN_CC=$(shell sh -c '$(CC) -V 2>&1 |egrep -i -c "sun|studio"') + ifeq ($(IS_SUN_CC),1) + SUN_SHARED_FLAG=-G + else + SUN_SHARED_FLAG=-shared + endif + REAL_LDFLAGS+= -ldl -lnsl -lsocket + DYLIB_MAKE_CMD=$(CC) $(SUN_SHARED_FLAG) -o $(DYLIBNAME) -h $(DYLIB_MINOR_NAME) $(LDFLAGS) + SSL_DYLIB_MAKE_CMD=$(CC) $(SUN_SHARED_FLAG) -o $(SSL_DYLIBNAME) -h $(SSL_DYLIB_MINOR_NAME) $(LDFLAGS) $(SSL_LDFLAGS) +endif +ifeq ($(uname_S),Darwin) + DYLIBSUFFIX=dylib + DYLIB_MINOR_NAME=$(LIBNAME).$(HIREDIS_SONAME).$(DYLIBSUFFIX) + DYLIB_MAKE_CMD=$(CC) -dynamiclib -Wl,-install_name,$(PREFIX)/$(LIBRARY_PATH)/$(DYLIB_MINOR_NAME) -o $(DYLIBNAME) $(LDFLAGS) + SSL_DYLIB_MAKE_CMD=$(CC) -dynamiclib -Wl,-install_name,$(PREFIX)/$(LIBRARY_PATH)/$(SSL_DYLIB_MINOR_NAME) -o $(SSL_DYLIBNAME) $(LDFLAGS) $(SSL_LDFLAGS) + DYLIB_PLUGIN=-Wl,-undefined -Wl,dynamic_lookup +endif + +all: dynamic static hiredis-test pkgconfig + +dynamic: $(DYLIBNAME) $(SSL_DYLIB) + +static: $(STLIBNAME) $(SSL_STLIB) + +pkgconfig: $(PKGCONFNAME) $(SSL_PKGCONF) + +# Deps (use make dep to generate this) +alloc.o: alloc.c fmacros.h alloc.h +async.o: async.c fmacros.h alloc.h async.h hiredis.h read.h sds.h net.h dict.c dict.h win32.h async_private.h +dict.o: dict.c fmacros.h alloc.h dict.h +hiredis.o: hiredis.c fmacros.h hiredis.h read.h sds.h alloc.h net.h async.h win32.h +net.o: net.c fmacros.h net.h hiredis.h read.h sds.h alloc.h sockcompat.h win32.h +read.o: read.c fmacros.h alloc.h read.h sds.h win32.h +sds.o: sds.c sds.h sdsalloc.h alloc.h +sockcompat.o: sockcompat.c sockcompat.h +test.o: test.c fmacros.h hiredis.h read.h sds.h alloc.h net.h sockcompat.h win32.h + +$(DYLIBNAME): $(OBJ) + $(DYLIB_MAKE_CMD) -o $(DYLIBNAME) $(OBJ) $(REAL_LDFLAGS) + +$(STLIBNAME): $(OBJ) + $(STLIB_MAKE_CMD) $(STLIBNAME) $(OBJ) + +#################### SSL building rules start #################### +$(SSL_DYLIBNAME): $(SSL_OBJ) + $(SSL_DYLIB_MAKE_CMD) $(DYLIB_PLUGIN) -o $(SSL_DYLIBNAME) $(SSL_OBJ) $(REAL_LDFLAGS) $(LDFLAGS) $(SSL_LDFLAGS) + +$(SSL_STLIBNAME): $(SSL_OBJ) + $(STLIB_MAKE_CMD) $(SSL_STLIBNAME) $(SSL_OBJ) + +$(SSL_OBJ): ssl.c hiredis.h read.h sds.h alloc.h async.h win32.h async_private.h +#################### SSL building rules end #################### + +# Binaries: +hiredis-example-libevent: examples/example-libevent.c adapters/libevent.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< -levent $(STLIBNAME) $(REAL_LDFLAGS) + +hiredis-example-libevent-ssl: examples/example-libevent-ssl.c adapters/libevent.h $(STLIBNAME) $(SSL_STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< -levent $(STLIBNAME) $(SSL_STLIBNAME) $(REAL_LDFLAGS) $(SSL_LDFLAGS) + +hiredis-example-libev: examples/example-libev.c adapters/libev.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< -lev $(STLIBNAME) $(REAL_LDFLAGS) + +hiredis-example-glib: examples/example-glib.c adapters/glib.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< $(shell pkg-config --cflags --libs glib-2.0) $(STLIBNAME) $(REAL_LDFLAGS) + +hiredis-example-ivykis: examples/example-ivykis.c adapters/ivykis.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< -livykis $(STLIBNAME) $(REAL_LDFLAGS) + +hiredis-example-macosx: examples/example-macosx.c adapters/macosx.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< -framework CoreFoundation $(STLIBNAME) $(REAL_LDFLAGS) + +hiredis-example-ssl: examples/example-ssl.c $(STLIBNAME) $(SSL_STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< $(STLIBNAME) $(SSL_STLIBNAME) $(REAL_LDFLAGS) $(SSL_LDFLAGS) + +ifndef AE_DIR +hiredis-example-ae: + @echo "Please specify AE_DIR (e.g. /src)" + @false +else +hiredis-example-ae: examples/example-ae.c adapters/ae.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) $(REAL_LDFLAGS) -I. -I$(AE_DIR) $< $(AE_DIR)/ae.o $(AE_DIR)/zmalloc.o $(AE_DIR)/../deps/jemalloc/lib/libjemalloc.a -pthread $(STLIBNAME) +endif + +ifndef LIBUV_DIR +# dynamic link libuv.so +hiredis-example-libuv: examples/example-libuv.c adapters/libuv.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. -I$(LIBUV_DIR)/include $< -luv -lpthread -lrt $(STLIBNAME) $(REAL_LDFLAGS) +else +# use user provided static lib +hiredis-example-libuv: examples/example-libuv.c adapters/libuv.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. -I$(LIBUV_DIR)/include $< $(LIBUV_DIR)/.libs/libuv.a -lpthread -lrt $(STLIBNAME) $(REAL_LDFLAGS) +endif + +ifeq ($(and $(QT_MOC),$(QT_INCLUDE_DIR),$(QT_LIBRARY_DIR)),) +hiredis-example-qt: + @echo "Please specify QT_MOC, QT_INCLUDE_DIR AND QT_LIBRARY_DIR" + @false +else +hiredis-example-qt: examples/example-qt.cpp adapters/qt.h $(STLIBNAME) + $(QT_MOC) adapters/qt.h -I. -I$(QT_INCLUDE_DIR) -I$(QT_INCLUDE_DIR)/QtCore | \ + $(CXX) -x c++ -o qt-adapter-moc.o -c - $(REAL_CFLAGS) -I. -I$(QT_INCLUDE_DIR) -I$(QT_INCLUDE_DIR)/QtCore + $(QT_MOC) examples/example-qt.h -I. -I$(QT_INCLUDE_DIR) -I$(QT_INCLUDE_DIR)/QtCore | \ + $(CXX) -x c++ -o qt-example-moc.o -c - $(REAL_CFLAGS) -I. -I$(QT_INCLUDE_DIR) -I$(QT_INCLUDE_DIR)/QtCore + $(CXX) -o examples/$@ $(REAL_CFLAGS) $(REAL_LDFLAGS) -I. -I$(QT_INCLUDE_DIR) -I$(QT_INCLUDE_DIR)/QtCore -L$(QT_LIBRARY_DIR) qt-adapter-moc.o qt-example-moc.o $< -pthread $(STLIBNAME) -lQtCore +endif + +hiredis-example: examples/example.c $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< $(STLIBNAME) $(REAL_LDFLAGS) + +hiredis-example-push: examples/example-push.c $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< $(STLIBNAME) $(REAL_LDFLAGS) + +examples: $(EXAMPLES) + +TEST_LIBS = $(STLIBNAME) $(SSL_STLIB) +TEST_LDFLAGS = $(SSL_LDFLAGS) +ifeq ($(USE_SSL),1) + TEST_LDFLAGS += -pthread +endif +ifeq ($(TEST_ASYNC),1) + TEST_LDFLAGS += -levent +endif + +hiredis-test: test.o $(TEST_LIBS) + $(CC) -o $@ $(REAL_CFLAGS) -I. $^ $(REAL_LDFLAGS) $(TEST_LDFLAGS) + +hiredis-%: %.o $(STLIBNAME) + $(CC) $(REAL_CFLAGS) -o $@ $< $(TEST_LIBS) $(REAL_LDFLAGS) + +test: hiredis-test + ./hiredis-test + +check: hiredis-test + TEST_SSL=$(USE_SSL) ./test.sh + +.c.o: + $(CC) -std=c99 -c $(REAL_CFLAGS) $< + +clean: + rm -rf $(DYLIBNAME) $(STLIBNAME) $(SSL_DYLIBNAME) $(SSL_STLIBNAME) $(TESTS) $(PKGCONFNAME) examples/hiredis-example* *.o *.gcda *.gcno *.gcov + +dep: + $(CC) $(CPPFLAGS) $(CFLAGS) -MM *.c + +INSTALL?= cp -pPR + +$(PKGCONFNAME): hiredis.h + @echo "Generating $@ for pkgconfig..." + @echo prefix=$(PREFIX) > $@ + @echo exec_prefix=\$${prefix} >> $@ + @echo libdir=$(PREFIX)/$(LIBRARY_PATH) >> $@ + @echo includedir=$(PREFIX)/$(INCLUDE_PATH) >> $@ + @echo >> $@ + @echo Name: hiredis >> $@ + @echo Description: Minimalistic C client library for Redis. >> $@ + @echo Version: $(HIREDIS_MAJOR).$(HIREDIS_MINOR).$(HIREDIS_PATCH) >> $@ + @echo Libs: -L\$${libdir} -lhiredis >> $@ + @echo Cflags: -I\$${includedir} -D_FILE_OFFSET_BITS=64 >> $@ + +$(SSL_PKGCONFNAME): hiredis_ssl.h + @echo "Generating $@ for pkgconfig..." + @echo prefix=$(PREFIX) > $@ + @echo exec_prefix=\$${prefix} >> $@ + @echo libdir=$(PREFIX)/$(LIBRARY_PATH) >> $@ + @echo includedir=$(PREFIX)/$(INCLUDE_PATH) >> $@ + @echo >> $@ + @echo Name: hiredis_ssl >> $@ + @echo Description: SSL Support for hiredis. >> $@ + @echo Version: $(HIREDIS_MAJOR).$(HIREDIS_MINOR).$(HIREDIS_PATCH) >> $@ + @echo Requires: hiredis >> $@ + @echo Libs: -L\$${libdir} -lhiredis_ssl >> $@ + @echo Libs.private: -lssl -lcrypto >> $@ + +install: $(DYLIBNAME) $(STLIBNAME) $(PKGCONFNAME) $(SSL_INSTALL) + mkdir -p $(INSTALL_INCLUDE_PATH) $(INSTALL_INCLUDE_PATH)/adapters $(INSTALL_LIBRARY_PATH) + $(INSTALL) hiredis.h async.h read.h sds.h alloc.h $(INSTALL_INCLUDE_PATH) + $(INSTALL) adapters/*.h $(INSTALL_INCLUDE_PATH)/adapters + $(INSTALL) $(DYLIBNAME) $(INSTALL_LIBRARY_PATH)/$(DYLIB_MINOR_NAME) + cd $(INSTALL_LIBRARY_PATH) && ln -sf $(DYLIB_MINOR_NAME) $(DYLIBNAME) + $(INSTALL) $(STLIBNAME) $(INSTALL_LIBRARY_PATH) + mkdir -p $(INSTALL_PKGCONF_PATH) + $(INSTALL) $(PKGCONFNAME) $(INSTALL_PKGCONF_PATH) + +install-ssl: $(SSL_DYLIBNAME) $(SSL_STLIBNAME) $(SSL_PKGCONFNAME) + mkdir -p $(INSTALL_INCLUDE_PATH) $(INSTALL_LIBRARY_PATH) + $(INSTALL) hiredis_ssl.h $(INSTALL_INCLUDE_PATH) + $(INSTALL) $(SSL_DYLIBNAME) $(INSTALL_LIBRARY_PATH)/$(SSL_DYLIB_MINOR_NAME) + cd $(INSTALL_LIBRARY_PATH) && ln -sf $(SSL_DYLIB_MINOR_NAME) $(SSL_DYLIBNAME) + $(INSTALL) $(SSL_STLIBNAME) $(INSTALL_LIBRARY_PATH) + mkdir -p $(INSTALL_PKGCONF_PATH) + $(INSTALL) $(SSL_PKGCONFNAME) $(INSTALL_PKGCONF_PATH) + +32bit: + @echo "" + @echo "WARNING: if this fails under Linux you probably need to install libc6-dev-i386" + @echo "" + $(MAKE) CFLAGS="-m32" LDFLAGS="-m32" + +32bit-vars: + $(eval CFLAGS=-m32) + $(eval LDFLAGS=-m32) + +gprof: + $(MAKE) CFLAGS="-pg" LDFLAGS="-pg" + +gcov: + $(MAKE) CFLAGS+="-fprofile-arcs -ftest-coverage" LDFLAGS="-fprofile-arcs" + +coverage: gcov + make check + mkdir -p tmp/lcov + lcov -d . -c --exclude '/usr*' -o tmp/lcov/hiredis.info + genhtml --legend -o tmp/lcov/report tmp/lcov/hiredis.info + +noopt: + $(MAKE) OPTIMIZATION="" + +.PHONY: all test check clean dep install 32bit 32bit-vars gprof gcov noopt diff --git a/deps/hiredis/README.md b/deps/hiredis/README.md new file mode 100644 index 0000000..ed66220 --- /dev/null +++ b/deps/hiredis/README.md @@ -0,0 +1,671 @@ + +[![Build Status](https://github.com/redis/hiredis/actions/workflows/build.yml/badge.svg)](https://github.com/redis/hiredis/actions/workflows/build.yml) + +**This Readme reflects the latest changed in the master branch. See [v1.0.0](https://github.com/redis/hiredis/tree/v1.0.0) for the Readme and documentation for the latest release ([API/ABI history](https://abi-laboratory.pro/?view=timeline&l=hiredis)).** + +# HIREDIS + +Hiredis is a minimalistic C client library for the [Redis](https://redis.io/) database. + +It is minimalistic because it just adds minimal support for the protocol, but +at the same time it uses a high level printf-alike API in order to make it +much higher level than otherwise suggested by its minimal code base and the +lack of explicit bindings for every Redis command. + +Apart from supporting sending commands and receiving replies, it comes with +a reply parser that is decoupled from the I/O layer. It +is a stream parser designed for easy reusability, which can for instance be used +in higher level language bindings for efficient reply parsing. + +Hiredis only supports the binary-safe Redis protocol, so you can use it with any +Redis version >= 1.2.0. + +The library comes with multiple APIs. There is the +*synchronous API*, the *asynchronous API* and the *reply parsing API*. + +## Upgrading to `1.0.2` + +NOTE: v1.0.1 erroneously bumped SONAME, which is why it is skipped here. + +Version 1.0.2 is simply 1.0.0 with a fix for [CVE-2021-32765](https://github.com/redis/hiredis/security/advisories/GHSA-hfm9-39pp-55p2). They are otherwise identical. + +## Upgrading to `1.0.0` + +Version 1.0.0 marks the first stable release of Hiredis. +It includes some minor breaking changes, mostly to make the exposed API more uniform and self-explanatory. +It also bundles the updated `sds` library, to sync up with upstream and Redis. +For code changes see the [Changelog](CHANGELOG.md). + +_Note: As described below, a few member names have been changed but most applications should be able to upgrade with minor code changes and recompiling._ + +## IMPORTANT: Breaking changes from `0.14.1` -> `1.0.0` + +* `redisContext` has two additional members (`free_privdata`, and `privctx`). +* `redisOptions.timeout` has been renamed to `redisOptions.connect_timeout`, and we've added `redisOptions.command_timeout`. +* `redisReplyObjectFunctions.createArray` now takes `size_t` instead of `int` for its length parameter. + +## IMPORTANT: Breaking changes when upgrading from 0.13.x -> 0.14.x + +Bulk and multi-bulk lengths less than -1 or greater than `LLONG_MAX` are now +protocol errors. This is consistent with the RESP specification. On 32-bit +platforms, the upper bound is lowered to `SIZE_MAX`. + +Change `redisReply.len` to `size_t`, as it denotes the the size of a string + +User code should compare this to `size_t` values as well. If it was used to +compare to other values, casting might be necessary or can be removed, if +casting was applied before. + +## Upgrading from `<0.9.0` + +Version 0.9.0 is a major overhaul of hiredis in every aspect. However, upgrading existing +code using hiredis should not be a big pain. The key thing to keep in mind when +upgrading is that hiredis >= 0.9.0 uses a `redisContext*` to keep state, in contrast to +the stateless 0.0.1 that only has a file descriptor to work with. + +## Synchronous API + +To consume the synchronous API, there are only a few function calls that need to be introduced: + +```c +redisContext *redisConnect(const char *ip, int port); +void *redisCommand(redisContext *c, const char *format, ...); +void freeReplyObject(void *reply); +``` + +### Connecting + +The function `redisConnect` is used to create a so-called `redisContext`. The +context is where Hiredis holds state for a connection. The `redisContext` +struct has an integer `err` field that is non-zero when the connection is in +an error state. The field `errstr` will contain a string with a description of +the error. More information on errors can be found in the **Errors** section. +After trying to connect to Redis using `redisConnect` you should +check the `err` field to see if establishing the connection was successful: +```c +redisContext *c = redisConnect("127.0.0.1", 6379); +if (c == NULL || c->err) { + if (c) { + printf("Error: %s\n", c->errstr); + // handle error + } else { + printf("Can't allocate redis context\n"); + } +} +``` + +*Note: A `redisContext` is not thread-safe.* + +### Sending commands + +There are several ways to issue commands to Redis. The first that will be introduced is +`redisCommand`. This function takes a format similar to printf. In the simplest form, +it is used like this: +```c +reply = redisCommand(context, "SET foo bar"); +``` + +The specifier `%s` interpolates a string in the command, and uses `strlen` to +determine the length of the string: +```c +reply = redisCommand(context, "SET foo %s", value); +``` +When you need to pass binary safe strings in a command, the `%b` specifier can be +used. Together with a pointer to the string, it requires a `size_t` length argument +of the string: +```c +reply = redisCommand(context, "SET foo %b", value, (size_t) valuelen); +``` +Internally, Hiredis splits the command in different arguments and will +convert it to the protocol used to communicate with Redis. +One or more spaces separates arguments, so you can use the specifiers +anywhere in an argument: +```c +reply = redisCommand(context, "SET key:%s %s", myid, value); +``` + +### Using replies + +The return value of `redisCommand` holds a reply when the command was +successfully executed. When an error occurs, the return value is `NULL` and +the `err` field in the context will be set (see section on **Errors**). +Once an error is returned the context cannot be reused and you should set up +a new connection. + +The standard replies that `redisCommand` are of the type `redisReply`. The +`type` field in the `redisReply` should be used to test what kind of reply +was received: + +### RESP2 + +* **`REDIS_REPLY_STATUS`**: + * The command replied with a status reply. The status string can be accessed using `reply->str`. + The length of this string can be accessed using `reply->len`. + +* **`REDIS_REPLY_ERROR`**: + * The command replied with an error. The error string can be accessed identical to `REDIS_REPLY_STATUS`. + +* **`REDIS_REPLY_INTEGER`**: + * The command replied with an integer. The integer value can be accessed using the + `reply->integer` field of type `long long`. + +* **`REDIS_REPLY_NIL`**: + * The command replied with a **nil** object. There is no data to access. + +* **`REDIS_REPLY_STRING`**: + * A bulk (string) reply. The value of the reply can be accessed using `reply->str`. + The length of this string can be accessed using `reply->len`. + +* **`REDIS_REPLY_ARRAY`**: + * A multi bulk reply. The number of elements in the multi bulk reply is stored in + `reply->elements`. Every element in the multi bulk reply is a `redisReply` object as well + and can be accessed via `reply->element[..index..]`. + Redis may reply with nested arrays but this is fully supported. + +### RESP3 + +Hiredis also supports every new `RESP3` data type which are as follows. For more information about the protocol see the `RESP3` [specification.](https://github.com/antirez/RESP3/blob/master/spec.md) + +* **`REDIS_REPLY_DOUBLE`**: + * The command replied with a double-precision floating point number. + The value is stored as a string in the `str` member, and can be converted with `strtod` or similar. + +* **`REDIS_REPLY_BOOL`**: + * A boolean true/false reply. + The value is stored in the `integer` member and will be either `0` or `1`. + +* **`REDIS_REPLY_MAP`**: + * An array with the added invariant that there will always be an even number of elements. + The MAP is functionally equivalent to `REDIS_REPLY_ARRAY` except for the previously mentioned invariant. + +* **`REDIS_REPLY_SET`**: + * An array response where each entry is unique. + Like the MAP type, the data is identical to an array response except there are no duplicate values. + +* **`REDIS_REPLY_PUSH`**: + * An array that can be generated spontaneously by Redis. + This array response will always contain at least two subelements. The first contains the type of `PUSH` message (e.g. `message`, or `invalidate`), and the second being a sub-array with the `PUSH` payload itself. + +* **`REDIS_REPLY_ATTR`**: + * An array structurally identical to a `MAP` but intended as meta-data about a reply. + _As of Redis 6.0.6 this reply type is not used in Redis_ + +* **`REDIS_REPLY_BIGNUM`**: + * A string representing an arbitrarily large signed or unsigned integer value. + The number will be encoded as a string in the `str` member of `redisReply`. + +* **`REDIS_REPLY_VERB`**: + * A verbatim string, intended to be presented to the user without modification. + The string payload is stored in the `str` member, and type data is stored in the `vtype` member (e.g. `txt` for raw text or `md` for markdown). + +Replies should be freed using the `freeReplyObject()` function. +Note that this function will take care of freeing sub-reply objects +contained in arrays and nested arrays, so there is no need for the user to +free the sub replies (it is actually harmful and will corrupt the memory). + +**Important:** the current version of hiredis (1.0.0) frees replies when the +asynchronous API is used. This means you should not call `freeReplyObject` when +you use this API. The reply is cleaned up by hiredis _after_ the callback +returns. We may introduce a flag to make this configurable in future versions of the library. + +### Cleaning up + +To disconnect and free the context the following function can be used: +```c +void redisFree(redisContext *c); +``` +This function immediately closes the socket and then frees the allocations done in +creating the context. + +### Sending commands (cont'd) + +Together with `redisCommand`, the function `redisCommandArgv` can be used to issue commands. +It has the following prototype: +```c +void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); +``` +It takes the number of arguments `argc`, an array of strings `argv` and the lengths of the +arguments `argvlen`. For convenience, `argvlen` may be set to `NULL` and the function will +use `strlen(3)` on every argument to determine its length. Obviously, when any of the arguments +need to be binary safe, the entire array of lengths `argvlen` should be provided. + +The return value has the same semantic as `redisCommand`. + +### Pipelining + +To explain how Hiredis supports pipelining in a blocking connection, there needs to be +understanding of the internal execution flow. + +When any of the functions in the `redisCommand` family is called, Hiredis first formats the +command according to the Redis protocol. The formatted command is then put in the output buffer +of the context. This output buffer is dynamic, so it can hold any number of commands. +After the command is put in the output buffer, `redisGetReply` is called. This function has the +following two execution paths: + +1. The input buffer is non-empty: + * Try to parse a single reply from the input buffer and return it + * If no reply could be parsed, continue at *2* +2. The input buffer is empty: + * Write the **entire** output buffer to the socket + * Read from the socket until a single reply could be parsed + +The function `redisGetReply` is exported as part of the Hiredis API and can be used when a reply +is expected on the socket. To pipeline commands, the only things that needs to be done is +filling up the output buffer. For this cause, two commands can be used that are identical +to the `redisCommand` family, apart from not returning a reply: +```c +void redisAppendCommand(redisContext *c, const char *format, ...); +void redisAppendCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); +``` +After calling either function one or more times, `redisGetReply` can be used to receive the +subsequent replies. The return value for this function is either `REDIS_OK` or `REDIS_ERR`, where +the latter means an error occurred while reading a reply. Just as with the other commands, +the `err` field in the context can be used to find out what the cause of this error is. + +The following examples shows a simple pipeline (resulting in only a single call to `write(2)` and +a single call to `read(2)`): +```c +redisReply *reply; +redisAppendCommand(context,"SET foo bar"); +redisAppendCommand(context,"GET foo"); +redisGetReply(context,(void**)&reply); // reply for SET +freeReplyObject(reply); +redisGetReply(context,(void**)&reply); // reply for GET +freeReplyObject(reply); +``` +This API can also be used to implement a blocking subscriber: +```c +reply = redisCommand(context,"SUBSCRIBE foo"); +freeReplyObject(reply); +while(redisGetReply(context,(void *)&reply) == REDIS_OK) { + // consume message + freeReplyObject(reply); +} +``` +### Errors + +When a function call is not successful, depending on the function either `NULL` or `REDIS_ERR` is +returned. The `err` field inside the context will be non-zero and set to one of the +following constants: + +* **`REDIS_ERR_IO`**: + There was an I/O error while creating the connection, trying to write + to the socket or read from the socket. If you included `errno.h` in your + application, you can use the global `errno` variable to find out what is + wrong. + +* **`REDIS_ERR_EOF`**: + The server closed the connection which resulted in an empty read. + +* **`REDIS_ERR_PROTOCOL`**: + There was an error while parsing the protocol. + +* **`REDIS_ERR_OTHER`**: + Any other error. Currently, it is only used when a specified hostname to connect + to cannot be resolved. + +In every case, the `errstr` field in the context will be set to hold a string representation +of the error. + +## Asynchronous API + +Hiredis comes with an asynchronous API that works easily with any event library. +Examples are bundled that show using Hiredis with [libev](http://software.schmorp.de/pkg/libev.html) +and [libevent](http://monkey.org/~provos/libevent/). + +### Connecting + +The function `redisAsyncConnect` can be used to establish a non-blocking connection to +Redis. It returns a pointer to the newly created `redisAsyncContext` struct. The `err` field +should be checked after creation to see if there were errors creating the connection. +Because the connection that will be created is non-blocking, the kernel is not able to +instantly return if the specified host and port is able to accept a connection. + +*Note: A `redisAsyncContext` is not thread-safe.* + +```c +redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); +if (c->err) { + printf("Error: %s\n", c->errstr); + // handle error +} +``` + +The asynchronous context can hold a disconnect callback function that is called when the +connection is disconnected (either because of an error or per user request). This function should +have the following prototype: +```c +void(const redisAsyncContext *c, int status); +``` +On a disconnect, the `status` argument is set to `REDIS_OK` when disconnection was initiated by the +user, or `REDIS_ERR` when the disconnection was caused by an error. When it is `REDIS_ERR`, the `err` +field in the context can be accessed to find out the cause of the error. + +The context object is always freed after the disconnect callback fired. When a reconnect is needed, +the disconnect callback is a good point to do so. + +Setting the disconnect callback can only be done once per context. For subsequent calls it will +return `REDIS_ERR`. The function to set the disconnect callback has the following prototype: +```c +int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn); +``` +`ac->data` may be used to pass user data to this callback, the same can be done for redisConnectCallback. +### Sending commands and their callbacks + +In an asynchronous context, commands are automatically pipelined due to the nature of an event loop. +Therefore, unlike the synchronous API, there is only a single way to send commands. +Because commands are sent to Redis asynchronously, issuing a command requires a callback function +that is called when the reply is received. Reply callbacks should have the following prototype: +```c +void(redisAsyncContext *c, void *reply, void *privdata); +``` +The `privdata` argument can be used to curry arbitrary data to the callback from the point where +the command is initially queued for execution. + +The functions that can be used to issue commands in an asynchronous context are: +```c +int redisAsyncCommand( + redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, + const char *format, ...); +int redisAsyncCommandArgv( + redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, + int argc, const char **argv, const size_t *argvlen); +``` +Both functions work like their blocking counterparts. The return value is `REDIS_OK` when the command +was successfully added to the output buffer and `REDIS_ERR` otherwise. Example: when the connection +is being disconnected per user-request, no new commands may be added to the output buffer and `REDIS_ERR` is +returned on calls to the `redisAsyncCommand` family. + +If the reply for a command with a `NULL` callback is read, it is immediately freed. When the callback +for a command is non-`NULL`, the memory is freed immediately following the callback: the reply is only +valid for the duration of the callback. + +All pending callbacks are called with a `NULL` reply when the context encountered an error. + +### Disconnecting + +An asynchronous connection can be terminated using: +```c +void redisAsyncDisconnect(redisAsyncContext *ac); +``` +When this function is called, the connection is **not** immediately terminated. Instead, new +commands are no longer accepted and the connection is only terminated when all pending commands +have been written to the socket, their respective replies have been read and their respective +callbacks have been executed. After this, the disconnection callback is executed with the +`REDIS_OK` status and the context object is freed. + +### Hooking it up to event library *X* + +There are a few hooks that need to be set on the context object after it is created. +See the `adapters/` directory for bindings to *libev* and *libevent*. + +## Reply parsing API + +Hiredis comes with a reply parsing API that makes it easy for writing higher +level language bindings. + +The reply parsing API consists of the following functions: +```c +redisReader *redisReaderCreate(void); +void redisReaderFree(redisReader *reader); +int redisReaderFeed(redisReader *reader, const char *buf, size_t len); +int redisReaderGetReply(redisReader *reader, void **reply); +``` +The same set of functions are used internally by hiredis when creating a +normal Redis context, the above API just exposes it to the user for a direct +usage. + +### Usage + +The function `redisReaderCreate` creates a `redisReader` structure that holds a +buffer with unparsed data and state for the protocol parser. + +Incoming data -- most likely from a socket -- can be placed in the internal +buffer of the `redisReader` using `redisReaderFeed`. This function will make a +copy of the buffer pointed to by `buf` for `len` bytes. This data is parsed +when `redisReaderGetReply` is called. This function returns an integer status +and a reply object (as described above) via `void **reply`. The returned status +can be either `REDIS_OK` or `REDIS_ERR`, where the latter means something went +wrong (either a protocol error, or an out of memory error). + +The parser limits the level of nesting for multi bulk payloads to 7. If the +multi bulk nesting level is higher than this, the parser returns an error. + +### Customizing replies + +The function `redisReaderGetReply` creates `redisReply` and makes the function +argument `reply` point to the created `redisReply` variable. For instance, if +the response of type `REDIS_REPLY_STATUS` then the `str` field of `redisReply` +will hold the status as a vanilla C string. However, the functions that are +responsible for creating instances of the `redisReply` can be customized by +setting the `fn` field on the `redisReader` struct. This should be done +immediately after creating the `redisReader`. + +For example, [hiredis-rb](https://github.com/pietern/hiredis-rb/blob/master/ext/hiredis_ext/reader.c) +uses customized reply object functions to create Ruby objects. + +### Reader max buffer + +Both when using the Reader API directly or when using it indirectly via a +normal Redis context, the redisReader structure uses a buffer in order to +accumulate data from the server. +Usually this buffer is destroyed when it is empty and is larger than 16 +KiB in order to avoid wasting memory in unused buffers + +However when working with very big payloads destroying the buffer may slow +down performances considerably, so it is possible to modify the max size of +an idle buffer changing the value of the `maxbuf` field of the reader structure +to the desired value. The special value of 0 means that there is no maximum +value for an idle buffer, so the buffer will never get freed. + +For instance if you have a normal Redis context you can set the maximum idle +buffer to zero (unlimited) just with: +```c +context->reader->maxbuf = 0; +``` +This should be done only in order to maximize performances when working with +large payloads. The context should be set back to `REDIS_READER_MAX_BUF` again +as soon as possible in order to prevent allocation of useless memory. + +### Reader max array elements + +By default the hiredis reply parser sets the maximum number of multi-bulk elements +to 2^32 - 1 or 4,294,967,295 entries. If you need to process multi-bulk replies +with more than this many elements you can set the value higher or to zero, meaning +unlimited with: +```c +context->reader->maxelements = 0; +``` + +## SSL/TLS Support + +### Building + +SSL/TLS support is not built by default and requires an explicit flag: + + make USE_SSL=1 + +This requires OpenSSL development package (e.g. including header files to be +available. + +When enabled, SSL/TLS support is built into extra `libhiredis_ssl.a` and +`libhiredis_ssl.so` static/dynamic libraries. This leaves the original libraries +unaffected so no additional dependencies are introduced. + +### Using it + +First, you'll need to make sure you include the SSL header file: + +```c +#include "hiredis.h" +#include "hiredis_ssl.h" +``` + +You will also need to link against `libhiredis_ssl`, **in addition** to +`libhiredis` and add `-lssl -lcrypto` to satisfy its dependencies. + +Hiredis implements SSL/TLS on top of its normal `redisContext` or +`redisAsyncContext`, so you will need to establish a connection first and then +initiate an SSL/TLS handshake. + +#### Hiredis OpenSSL Wrappers + +Before Hiredis can negotiate an SSL/TLS connection, it is necessary to +initialize OpenSSL and create a context. You can do that in two ways: + +1. Work directly with the OpenSSL API to initialize the library's global context + and create `SSL_CTX *` and `SSL *` contexts. With an `SSL *` object you can + call `redisInitiateSSL()`. +2. Work with a set of Hiredis-provided wrappers around OpenSSL, create a + `redisSSLContext` object to hold configuration and use + `redisInitiateSSLWithContext()` to initiate the SSL/TLS handshake. + +```c +/* An Hiredis SSL context. It holds SSL configuration and can be reused across + * many contexts. + */ +redisSSLContext *ssl_context; + +/* An error variable to indicate what went wrong, if the context fails to + * initialize. + */ +redisSSLContextError ssl_error; + +/* Initialize global OpenSSL state. + * + * You should call this only once when your app initializes, and only if + * you don't explicitly or implicitly initialize OpenSSL it elsewhere. + */ +redisInitOpenSSL(); + +/* Create SSL context */ +ssl_context = redisCreateSSLContext( + "cacertbundle.crt", /* File name of trusted CA/ca bundle file, optional */ + "/path/to/certs", /* Path of trusted certificates, optional */ + "client_cert.pem", /* File name of client certificate file, optional */ + "client_key.pem", /* File name of client private key, optional */ + "redis.mydomain.com", /* Server name to request (SNI), optional */ + &ssl_error); + +if(ssl_context == NULL || ssl_error != 0) { + /* Handle error and abort... */ + /* e.g. + printf("SSL error: %s\n", + (ssl_error != 0) ? + redisSSLContextGetError(ssl_error) : "Unknown error"); + // Abort + */ +} + +/* Create Redis context and establish connection */ +c = redisConnect("localhost", 6443); +if (c == NULL || c->err) { + /* Handle error and abort... */ +} + +/* Negotiate SSL/TLS */ +if (redisInitiateSSLWithContext(c, ssl_context) != REDIS_OK) { + /* Handle error, in c->err / c->errstr */ +} +``` + +## RESP3 PUSH replies +Redis 6.0 introduced PUSH replies with the reply-type `>`. These messages are generated spontaneously and can arrive at any time, so must be handled using callbacks. + +### Default behavior +Hiredis installs handlers on `redisContext` and `redisAsyncContext` by default, which will intercept and free any PUSH replies detected. This means existing code will work as-is after upgrading to Redis 6 and switching to `RESP3`. + +### Custom PUSH handler prototypes +The callback prototypes differ between `redisContext` and `redisAsyncContext`. + +#### redisContext +```c +void my_push_handler(void *privdata, void *reply) { + /* Handle the reply */ + + /* Note: We need to free the reply in our custom handler for + blocking contexts. This lets us keep the reply if + we want. */ + freeReplyObject(reply); +} +``` + +#### redisAsyncContext +```c +void my_async_push_handler(redisAsyncContext *ac, void *reply) { + /* Handle the reply */ + + /* Note: Because async hiredis always frees replies, you should + not call freeReplyObject in an async push callback. */ +} +``` + +### Installing a custom handler +There are two ways to set your own PUSH handlers. + +1. Set `push_cb` or `async_push_cb` in the `redisOptions` struct and connect with `redisConnectWithOptions` or `redisAsyncConnectWithOptions`. + ```c + redisOptions = {0}; + REDIS_OPTIONS_SET_TCP(&options, "127.0.0.1", 6379); + options->push_cb = my_push_handler; + redisContext *context = redisConnectWithOptions(&options); + ``` +2. Call `redisSetPushCallback` or `redisAsyncSetPushCallback` on a connected context. + ```c + redisContext *context = redisConnect("127.0.0.1", 6379); + redisSetPushCallback(context, my_push_handler); + ``` + + _Note `redisSetPushCallback` and `redisAsyncSetPushCallback` both return any currently configured handler, making it easy to override and then return to the old value._ + +### Specifying no handler +If you have a unique use-case where you don't want hiredis to automatically intercept and free PUSH replies, you will want to configure no handler at all. This can be done in two ways. +1. Set the `REDIS_OPT_NO_PUSH_AUTOFREE` flag in `redisOptions` and leave the callback function pointer `NULL`. + ```c + redisOptions = {0}; + REDIS_OPTIONS_SET_TCP(&options, "127.0.0.1", 6379); + options->options |= REDIS_OPT_NO_PUSH_AUTOFREE; + redisContext *context = redisConnectWithOptions(&options); + ``` +3. Call `redisSetPushCallback` with `NULL` once connected. + ```c + redisContext *context = redisConnect("127.0.0.1", 6379); + redisSetPushCallback(context, NULL); + ``` + + _Note: With no handler configured, calls to `redisCommand` may generate more than one reply, so this strategy is only applicable when there's some kind of blocking`redisGetReply()` loop (e.g. `MONITOR` or `SUBSCRIBE` workloads)._ + +## Allocator injection + +Hiredis uses a pass-thru structure of function pointers defined in [alloc.h](https://github.com/redis/hiredis/blob/f5d25850/alloc.h#L41) that contain the currently configured allocation and deallocation functions. By default they just point to libc (`malloc`, `calloc`, `realloc`, etc). + +### Overriding + +One can override the allocators like so: + +```c +hiredisAllocFuncs myfuncs = { + .mallocFn = my_malloc, + .callocFn = my_calloc, + .reallocFn = my_realloc, + .strdupFn = my_strdup, + .freeFn = my_free, +}; + +// Override allocators (function returns current allocators if needed) +hiredisAllocFuncs orig = hiredisSetAllocators(&myfuncs); +``` + +To reset the allocators to their default libc function simply call: + +```c +hiredisResetAllocators(); +``` + +## AUTHORS + +Salvatore Sanfilippo (antirez at gmail),\ +Pieter Noordhuis (pcnoordhuis at gmail)\ +Michael Grunder (michael dot grunder at gmail) + +_Hiredis is released under the BSD license._ diff --git a/deps/hiredis/adapters/ae.h b/deps/hiredis/adapters/ae.h new file mode 100644 index 0000000..660d82e --- /dev/null +++ b/deps/hiredis/adapters/ae.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_AE_H__ +#define __HIREDIS_AE_H__ +#include +#include +#include "../hiredis.h" +#include "../async.h" + +typedef struct redisAeEvents { + redisAsyncContext *context; + aeEventLoop *loop; + int fd; + int reading, writing; +} redisAeEvents; + +static void redisAeReadEvent(aeEventLoop *el, int fd, void *privdata, int mask) { + ((void)el); ((void)fd); ((void)mask); + + redisAeEvents *e = (redisAeEvents*)privdata; + redisAsyncHandleRead(e->context); +} + +static void redisAeWriteEvent(aeEventLoop *el, int fd, void *privdata, int mask) { + ((void)el); ((void)fd); ((void)mask); + + redisAeEvents *e = (redisAeEvents*)privdata; + redisAsyncHandleWrite(e->context); +} + +static void redisAeAddRead(void *privdata) { + redisAeEvents *e = (redisAeEvents*)privdata; + aeEventLoop *loop = e->loop; + if (!e->reading) { + e->reading = 1; + aeCreateFileEvent(loop,e->fd,AE_READABLE,redisAeReadEvent,e); + } +} + +static void redisAeDelRead(void *privdata) { + redisAeEvents *e = (redisAeEvents*)privdata; + aeEventLoop *loop = e->loop; + if (e->reading) { + e->reading = 0; + aeDeleteFileEvent(loop,e->fd,AE_READABLE); + } +} + +static void redisAeAddWrite(void *privdata) { + redisAeEvents *e = (redisAeEvents*)privdata; + aeEventLoop *loop = e->loop; + if (!e->writing) { + e->writing = 1; + aeCreateFileEvent(loop,e->fd,AE_WRITABLE,redisAeWriteEvent,e); + } +} + +static void redisAeDelWrite(void *privdata) { + redisAeEvents *e = (redisAeEvents*)privdata; + aeEventLoop *loop = e->loop; + if (e->writing) { + e->writing = 0; + aeDeleteFileEvent(loop,e->fd,AE_WRITABLE); + } +} + +static void redisAeCleanup(void *privdata) { + redisAeEvents *e = (redisAeEvents*)privdata; + redisAeDelRead(privdata); + redisAeDelWrite(privdata); + hi_free(e); +} + +static int redisAeAttach(aeEventLoop *loop, redisAsyncContext *ac) { + redisContext *c = &(ac->c); + redisAeEvents *e; + + /* Nothing should be attached when something is already attached */ + if (ac->ev.data != NULL) + return REDIS_ERR; + + /* Create container for context and r/w events */ + e = (redisAeEvents*)hi_malloc(sizeof(*e)); + if (e == NULL) + return REDIS_ERR; + + e->context = ac; + e->loop = loop; + e->fd = c->fd; + e->reading = e->writing = 0; + + /* Register functions to start/stop listening for events */ + ac->ev.addRead = redisAeAddRead; + ac->ev.delRead = redisAeDelRead; + ac->ev.addWrite = redisAeAddWrite; + ac->ev.delWrite = redisAeDelWrite; + ac->ev.cleanup = redisAeCleanup; + ac->ev.data = e; + + return REDIS_OK; +} +#endif diff --git a/deps/hiredis/adapters/glib.h b/deps/hiredis/adapters/glib.h new file mode 100644 index 0000000..ad59dd1 --- /dev/null +++ b/deps/hiredis/adapters/glib.h @@ -0,0 +1,156 @@ +#ifndef __HIREDIS_GLIB_H__ +#define __HIREDIS_GLIB_H__ + +#include + +#include "../hiredis.h" +#include "../async.h" + +typedef struct +{ + GSource source; + redisAsyncContext *ac; + GPollFD poll_fd; +} RedisSource; + +static void +redis_source_add_read (gpointer data) +{ + RedisSource *source = (RedisSource *)data; + g_return_if_fail(source); + source->poll_fd.events |= G_IO_IN; + g_main_context_wakeup(g_source_get_context((GSource *)data)); +} + +static void +redis_source_del_read (gpointer data) +{ + RedisSource *source = (RedisSource *)data; + g_return_if_fail(source); + source->poll_fd.events &= ~G_IO_IN; + g_main_context_wakeup(g_source_get_context((GSource *)data)); +} + +static void +redis_source_add_write (gpointer data) +{ + RedisSource *source = (RedisSource *)data; + g_return_if_fail(source); + source->poll_fd.events |= G_IO_OUT; + g_main_context_wakeup(g_source_get_context((GSource *)data)); +} + +static void +redis_source_del_write (gpointer data) +{ + RedisSource *source = (RedisSource *)data; + g_return_if_fail(source); + source->poll_fd.events &= ~G_IO_OUT; + g_main_context_wakeup(g_source_get_context((GSource *)data)); +} + +static void +redis_source_cleanup (gpointer data) +{ + RedisSource *source = (RedisSource *)data; + + g_return_if_fail(source); + + redis_source_del_read(source); + redis_source_del_write(source); + /* + * It is not our responsibility to remove ourself from the + * current main loop. However, we will remove the GPollFD. + */ + if (source->poll_fd.fd >= 0) { + g_source_remove_poll((GSource *)data, &source->poll_fd); + source->poll_fd.fd = -1; + } +} + +static gboolean +redis_source_prepare (GSource *source, + gint *timeout_) +{ + RedisSource *redis = (RedisSource *)source; + *timeout_ = -1; + return !!(redis->poll_fd.events & redis->poll_fd.revents); +} + +static gboolean +redis_source_check (GSource *source) +{ + RedisSource *redis = (RedisSource *)source; + return !!(redis->poll_fd.events & redis->poll_fd.revents); +} + +static gboolean +redis_source_dispatch (GSource *source, + GSourceFunc callback, + gpointer user_data) +{ + RedisSource *redis = (RedisSource *)source; + + if ((redis->poll_fd.revents & G_IO_OUT)) { + redisAsyncHandleWrite(redis->ac); + redis->poll_fd.revents &= ~G_IO_OUT; + } + + if ((redis->poll_fd.revents & G_IO_IN)) { + redisAsyncHandleRead(redis->ac); + redis->poll_fd.revents &= ~G_IO_IN; + } + + if (callback) { + return callback(user_data); + } + + return TRUE; +} + +static void +redis_source_finalize (GSource *source) +{ + RedisSource *redis = (RedisSource *)source; + + if (redis->poll_fd.fd >= 0) { + g_source_remove_poll(source, &redis->poll_fd); + redis->poll_fd.fd = -1; + } +} + +static GSource * +redis_source_new (redisAsyncContext *ac) +{ + static GSourceFuncs source_funcs = { + .prepare = redis_source_prepare, + .check = redis_source_check, + .dispatch = redis_source_dispatch, + .finalize = redis_source_finalize, + }; + redisContext *c = &ac->c; + RedisSource *source; + + g_return_val_if_fail(ac != NULL, NULL); + + source = (RedisSource *)g_source_new(&source_funcs, sizeof *source); + if (source == NULL) + return NULL; + + source->ac = ac; + source->poll_fd.fd = c->fd; + source->poll_fd.events = 0; + source->poll_fd.revents = 0; + g_source_add_poll((GSource *)source, &source->poll_fd); + + ac->ev.addRead = redis_source_add_read; + ac->ev.delRead = redis_source_del_read; + ac->ev.addWrite = redis_source_add_write; + ac->ev.delWrite = redis_source_del_write; + ac->ev.cleanup = redis_source_cleanup; + ac->ev.data = source; + + return (GSource *)source; +} + +#endif /* __HIREDIS_GLIB_H__ */ diff --git a/deps/hiredis/adapters/ivykis.h b/deps/hiredis/adapters/ivykis.h new file mode 100644 index 0000000..179f6ab --- /dev/null +++ b/deps/hiredis/adapters/ivykis.h @@ -0,0 +1,84 @@ +#ifndef __HIREDIS_IVYKIS_H__ +#define __HIREDIS_IVYKIS_H__ +#include +#include "../hiredis.h" +#include "../async.h" + +typedef struct redisIvykisEvents { + redisAsyncContext *context; + struct iv_fd fd; +} redisIvykisEvents; + +static void redisIvykisReadEvent(void *arg) { + redisAsyncContext *context = (redisAsyncContext *)arg; + redisAsyncHandleRead(context); +} + +static void redisIvykisWriteEvent(void *arg) { + redisAsyncContext *context = (redisAsyncContext *)arg; + redisAsyncHandleWrite(context); +} + +static void redisIvykisAddRead(void *privdata) { + redisIvykisEvents *e = (redisIvykisEvents*)privdata; + iv_fd_set_handler_in(&e->fd, redisIvykisReadEvent); +} + +static void redisIvykisDelRead(void *privdata) { + redisIvykisEvents *e = (redisIvykisEvents*)privdata; + iv_fd_set_handler_in(&e->fd, NULL); +} + +static void redisIvykisAddWrite(void *privdata) { + redisIvykisEvents *e = (redisIvykisEvents*)privdata; + iv_fd_set_handler_out(&e->fd, redisIvykisWriteEvent); +} + +static void redisIvykisDelWrite(void *privdata) { + redisIvykisEvents *e = (redisIvykisEvents*)privdata; + iv_fd_set_handler_out(&e->fd, NULL); +} + +static void redisIvykisCleanup(void *privdata) { + redisIvykisEvents *e = (redisIvykisEvents*)privdata; + + iv_fd_unregister(&e->fd); + hi_free(e); +} + +static int redisIvykisAttach(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + redisIvykisEvents *e; + + /* Nothing should be attached when something is already attached */ + if (ac->ev.data != NULL) + return REDIS_ERR; + + /* Create container for context and r/w events */ + e = (redisIvykisEvents*)hi_malloc(sizeof(*e)); + if (e == NULL) + return REDIS_ERR; + + e->context = ac; + + /* Register functions to start/stop listening for events */ + ac->ev.addRead = redisIvykisAddRead; + ac->ev.delRead = redisIvykisDelRead; + ac->ev.addWrite = redisIvykisAddWrite; + ac->ev.delWrite = redisIvykisDelWrite; + ac->ev.cleanup = redisIvykisCleanup; + ac->ev.data = e; + + /* Initialize and install read/write events */ + IV_FD_INIT(&e->fd); + e->fd.fd = c->fd; + e->fd.handler_in = redisIvykisReadEvent; + e->fd.handler_out = redisIvykisWriteEvent; + e->fd.handler_err = NULL; + e->fd.cookie = e->context; + + iv_fd_register(&e->fd); + + return REDIS_OK; +} +#endif diff --git a/deps/hiredis/adapters/libev.h b/deps/hiredis/adapters/libev.h new file mode 100644 index 0000000..c59d3da --- /dev/null +++ b/deps/hiredis/adapters/libev.h @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_LIBEV_H__ +#define __HIREDIS_LIBEV_H__ +#include +#include +#include +#include "../hiredis.h" +#include "../async.h" + +typedef struct redisLibevEvents { + redisAsyncContext *context; + struct ev_loop *loop; + int reading, writing; + ev_io rev, wev; + ev_timer timer; +} redisLibevEvents; + +static void redisLibevReadEvent(EV_P_ ev_io *watcher, int revents) { +#if EV_MULTIPLICITY + ((void)EV_A); +#endif + ((void)revents); + + redisLibevEvents *e = (redisLibevEvents*)watcher->data; + redisAsyncHandleRead(e->context); +} + +static void redisLibevWriteEvent(EV_P_ ev_io *watcher, int revents) { +#if EV_MULTIPLICITY + ((void)EV_A); +#endif + ((void)revents); + + redisLibevEvents *e = (redisLibevEvents*)watcher->data; + redisAsyncHandleWrite(e->context); +} + +static void redisLibevAddRead(void *privdata) { + redisLibevEvents *e = (redisLibevEvents*)privdata; +#if EV_MULTIPLICITY + struct ev_loop *loop = e->loop; +#endif + if (!e->reading) { + e->reading = 1; + ev_io_start(EV_A_ &e->rev); + } +} + +static void redisLibevDelRead(void *privdata) { + redisLibevEvents *e = (redisLibevEvents*)privdata; +#if EV_MULTIPLICITY + struct ev_loop *loop = e->loop; +#endif + if (e->reading) { + e->reading = 0; + ev_io_stop(EV_A_ &e->rev); + } +} + +static void redisLibevAddWrite(void *privdata) { + redisLibevEvents *e = (redisLibevEvents*)privdata; +#if EV_MULTIPLICITY + struct ev_loop *loop = e->loop; +#endif + if (!e->writing) { + e->writing = 1; + ev_io_start(EV_A_ &e->wev); + } +} + +static void redisLibevDelWrite(void *privdata) { + redisLibevEvents *e = (redisLibevEvents*)privdata; +#if EV_MULTIPLICITY + struct ev_loop *loop = e->loop; +#endif + if (e->writing) { + e->writing = 0; + ev_io_stop(EV_A_ &e->wev); + } +} + +static void redisLibevStopTimer(void *privdata) { + redisLibevEvents *e = (redisLibevEvents*)privdata; +#if EV_MULTIPLICITY + struct ev_loop *loop = e->loop; +#endif + ev_timer_stop(EV_A_ &e->timer); +} + +static void redisLibevCleanup(void *privdata) { + redisLibevEvents *e = (redisLibevEvents*)privdata; + redisLibevDelRead(privdata); + redisLibevDelWrite(privdata); + redisLibevStopTimer(privdata); + hi_free(e); +} + +static void redisLibevTimeout(EV_P_ ev_timer *timer, int revents) { +#if EV_MULTIPLICITY + ((void)EV_A); +#endif + ((void)revents); + redisLibevEvents *e = (redisLibevEvents*)timer->data; + redisAsyncHandleTimeout(e->context); +} + +static void redisLibevSetTimeout(void *privdata, struct timeval tv) { + redisLibevEvents *e = (redisLibevEvents*)privdata; +#if EV_MULTIPLICITY + struct ev_loop *loop = e->loop; +#endif + + if (!ev_is_active(&e->timer)) { + ev_init(&e->timer, redisLibevTimeout); + e->timer.data = e; + } + + e->timer.repeat = tv.tv_sec + tv.tv_usec / 1000000.00; + ev_timer_again(EV_A_ &e->timer); +} + +static int redisLibevAttach(EV_P_ redisAsyncContext *ac) { + redisContext *c = &(ac->c); + redisLibevEvents *e; + + /* Nothing should be attached when something is already attached */ + if (ac->ev.data != NULL) + return REDIS_ERR; + + /* Create container for context and r/w events */ + e = (redisLibevEvents*)hi_calloc(1, sizeof(*e)); + if (e == NULL) + return REDIS_ERR; + + e->context = ac; +#if EV_MULTIPLICITY + e->loop = EV_A; +#else + e->loop = NULL; +#endif + e->rev.data = e; + e->wev.data = e; + + /* Register functions to start/stop listening for events */ + ac->ev.addRead = redisLibevAddRead; + ac->ev.delRead = redisLibevDelRead; + ac->ev.addWrite = redisLibevAddWrite; + ac->ev.delWrite = redisLibevDelWrite; + ac->ev.cleanup = redisLibevCleanup; + ac->ev.scheduleTimer = redisLibevSetTimeout; + ac->ev.data = e; + + /* Initialize read/write events */ + ev_io_init(&e->rev,redisLibevReadEvent,c->fd,EV_READ); + ev_io_init(&e->wev,redisLibevWriteEvent,c->fd,EV_WRITE); + return REDIS_OK; +} + +#endif diff --git a/deps/hiredis/adapters/libevent.h b/deps/hiredis/adapters/libevent.h new file mode 100644 index 0000000..73bb8ed --- /dev/null +++ b/deps/hiredis/adapters/libevent.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_LIBEVENT_H__ +#define __HIREDIS_LIBEVENT_H__ +#include +#include "../hiredis.h" +#include "../async.h" + +#define REDIS_LIBEVENT_DELETED 0x01 +#define REDIS_LIBEVENT_ENTERED 0x02 + +typedef struct redisLibeventEvents { + redisAsyncContext *context; + struct event *ev; + struct event_base *base; + struct timeval tv; + short flags; + short state; +} redisLibeventEvents; + +static void redisLibeventDestroy(redisLibeventEvents *e) { + hi_free(e); +} + +static void redisLibeventHandler(evutil_socket_t fd, short event, void *arg) { + ((void)fd); + redisLibeventEvents *e = (redisLibeventEvents*)arg; + e->state |= REDIS_LIBEVENT_ENTERED; + + #define CHECK_DELETED() if (e->state & REDIS_LIBEVENT_DELETED) {\ + redisLibeventDestroy(e);\ + return; \ + } + + if ((event & EV_TIMEOUT) && (e->state & REDIS_LIBEVENT_DELETED) == 0) { + redisAsyncHandleTimeout(e->context); + CHECK_DELETED(); + } + + if ((event & EV_READ) && e->context && (e->state & REDIS_LIBEVENT_DELETED) == 0) { + redisAsyncHandleRead(e->context); + CHECK_DELETED(); + } + + if ((event & EV_WRITE) && e->context && (e->state & REDIS_LIBEVENT_DELETED) == 0) { + redisAsyncHandleWrite(e->context); + CHECK_DELETED(); + } + + e->state &= ~REDIS_LIBEVENT_ENTERED; + #undef CHECK_DELETED +} + +static void redisLibeventUpdate(void *privdata, short flag, int isRemove) { + redisLibeventEvents *e = (redisLibeventEvents *)privdata; + const struct timeval *tv = e->tv.tv_sec || e->tv.tv_usec ? &e->tv : NULL; + + if (isRemove) { + if ((e->flags & flag) == 0) { + return; + } else { + e->flags &= ~flag; + } + } else { + if (e->flags & flag) { + return; + } else { + e->flags |= flag; + } + } + + event_del(e->ev); + event_assign(e->ev, e->base, e->context->c.fd, e->flags | EV_PERSIST, + redisLibeventHandler, privdata); + event_add(e->ev, tv); +} + +static void redisLibeventAddRead(void *privdata) { + redisLibeventUpdate(privdata, EV_READ, 0); +} + +static void redisLibeventDelRead(void *privdata) { + redisLibeventUpdate(privdata, EV_READ, 1); +} + +static void redisLibeventAddWrite(void *privdata) { + redisLibeventUpdate(privdata, EV_WRITE, 0); +} + +static void redisLibeventDelWrite(void *privdata) { + redisLibeventUpdate(privdata, EV_WRITE, 1); +} + +static void redisLibeventCleanup(void *privdata) { + redisLibeventEvents *e = (redisLibeventEvents*)privdata; + if (!e) { + return; + } + event_del(e->ev); + event_free(e->ev); + e->ev = NULL; + + if (e->state & REDIS_LIBEVENT_ENTERED) { + e->state |= REDIS_LIBEVENT_DELETED; + } else { + redisLibeventDestroy(e); + } +} + +static void redisLibeventSetTimeout(void *privdata, struct timeval tv) { + redisLibeventEvents *e = (redisLibeventEvents *)privdata; + short flags = e->flags; + e->flags = 0; + e->tv = tv; + redisLibeventUpdate(e, flags, 0); +} + +static int redisLibeventAttach(redisAsyncContext *ac, struct event_base *base) { + redisContext *c = &(ac->c); + redisLibeventEvents *e; + + /* Nothing should be attached when something is already attached */ + if (ac->ev.data != NULL) + return REDIS_ERR; + + /* Create container for context and r/w events */ + e = (redisLibeventEvents*)hi_calloc(1, sizeof(*e)); + if (e == NULL) + return REDIS_ERR; + + e->context = ac; + + /* Register functions to start/stop listening for events */ + ac->ev.addRead = redisLibeventAddRead; + ac->ev.delRead = redisLibeventDelRead; + ac->ev.addWrite = redisLibeventAddWrite; + ac->ev.delWrite = redisLibeventDelWrite; + ac->ev.cleanup = redisLibeventCleanup; + ac->ev.scheduleTimer = redisLibeventSetTimeout; + ac->ev.data = e; + + /* Initialize and install read/write events */ + e->ev = event_new(base, c->fd, EV_READ | EV_WRITE, redisLibeventHandler, e); + e->base = base; + return REDIS_OK; +} +#endif diff --git a/deps/hiredis/adapters/libuv.h b/deps/hiredis/adapters/libuv.h new file mode 100644 index 0000000..df0a845 --- /dev/null +++ b/deps/hiredis/adapters/libuv.h @@ -0,0 +1,163 @@ +#ifndef __HIREDIS_LIBUV_H__ +#define __HIREDIS_LIBUV_H__ +#include +#include +#include "../hiredis.h" +#include "../async.h" +#include + +typedef struct redisLibuvEvents { + redisAsyncContext* context; + uv_poll_t handle; + uv_timer_t timer; + int events; +} redisLibuvEvents; + + +static void redisLibuvPoll(uv_poll_t* handle, int status, int events) { + redisLibuvEvents* p = (redisLibuvEvents*)handle->data; + int ev = (status ? p->events : events); + + if (p->context != NULL && (ev & UV_READABLE)) { + redisAsyncHandleRead(p->context); + } + if (p->context != NULL && (ev & UV_WRITABLE)) { + redisAsyncHandleWrite(p->context); + } +} + + +static void redisLibuvAddRead(void *privdata) { + redisLibuvEvents* p = (redisLibuvEvents*)privdata; + + p->events |= UV_READABLE; + + uv_poll_start(&p->handle, p->events, redisLibuvPoll); +} + + +static void redisLibuvDelRead(void *privdata) { + redisLibuvEvents* p = (redisLibuvEvents*)privdata; + + p->events &= ~UV_READABLE; + + if (p->events) { + uv_poll_start(&p->handle, p->events, redisLibuvPoll); + } else { + uv_poll_stop(&p->handle); + } +} + + +static void redisLibuvAddWrite(void *privdata) { + redisLibuvEvents* p = (redisLibuvEvents*)privdata; + + p->events |= UV_WRITABLE; + + uv_poll_start(&p->handle, p->events, redisLibuvPoll); +} + + +static void redisLibuvDelWrite(void *privdata) { + redisLibuvEvents* p = (redisLibuvEvents*)privdata; + + p->events &= ~UV_WRITABLE; + + if (p->events) { + uv_poll_start(&p->handle, p->events, redisLibuvPoll); + } else { + uv_poll_stop(&p->handle); + } +} + +static void on_timer_close(uv_handle_t *handle) { + redisLibuvEvents* p = (redisLibuvEvents*)handle->data; + p->timer.data = NULL; + if (!p->handle.data) { + // both timer and handle are closed + hi_free(p); + } + // else, wait for `on_handle_close` +} + +static void on_handle_close(uv_handle_t *handle) { + redisLibuvEvents* p = (redisLibuvEvents*)handle->data; + p->handle.data = NULL; + if (!p->timer.data) { + // timer never started, or timer already destroyed + hi_free(p); + } + // else, wait for `on_timer_close` +} + +// libuv removed `status` parameter since v0.11.23 +// see: https://github.com/libuv/libuv/blob/v0.11.23/include/uv.h +#if (UV_VERSION_MAJOR == 0 && UV_VERSION_MINOR < 11) || \ + (UV_VERSION_MAJOR == 0 && UV_VERSION_MINOR == 11 && UV_VERSION_PATCH < 23) +static void redisLibuvTimeout(uv_timer_t *timer, int status) { + (void)status; // unused +#else +static void redisLibuvTimeout(uv_timer_t *timer) { +#endif + redisLibuvEvents *e = (redisLibuvEvents*)timer->data; + redisAsyncHandleTimeout(e->context); +} + +static void redisLibuvSetTimeout(void *privdata, struct timeval tv) { + redisLibuvEvents* p = (redisLibuvEvents*)privdata; + + uint64_t millsec = tv.tv_sec * 1000 + tv.tv_usec / 1000.0; + if (!p->timer.data) { + // timer is uninitialized + if (uv_timer_init(p->handle.loop, &p->timer) != 0) { + return; + } + p->timer.data = p; + } + // updates the timeout if the timer has already started + // or start the timer + uv_timer_start(&p->timer, redisLibuvTimeout, millsec, 0); +} + +static void redisLibuvCleanup(void *privdata) { + redisLibuvEvents* p = (redisLibuvEvents*)privdata; + + p->context = NULL; // indicate that context might no longer exist + if (p->timer.data) { + uv_close((uv_handle_t*)&p->timer, on_timer_close); + } + uv_close((uv_handle_t*)&p->handle, on_handle_close); +} + + +static int redisLibuvAttach(redisAsyncContext* ac, uv_loop_t* loop) { + redisContext *c = &(ac->c); + + if (ac->ev.data != NULL) { + return REDIS_ERR; + } + + ac->ev.addRead = redisLibuvAddRead; + ac->ev.delRead = redisLibuvDelRead; + ac->ev.addWrite = redisLibuvAddWrite; + ac->ev.delWrite = redisLibuvDelWrite; + ac->ev.cleanup = redisLibuvCleanup; + ac->ev.scheduleTimer = redisLibuvSetTimeout; + + redisLibuvEvents* p = (redisLibuvEvents*)hi_malloc(sizeof(*p)); + if (p == NULL) + return REDIS_ERR; + + memset(p, 0, sizeof(*p)); + + if (uv_poll_init_socket(loop, &p->handle, c->fd) != 0) { + return REDIS_ERR; + } + + ac->ev.data = p; + p->handle.data = p; + p->context = ac; + + return REDIS_OK; +} +#endif diff --git a/deps/hiredis/adapters/macosx.h b/deps/hiredis/adapters/macosx.h new file mode 100644 index 0000000..3c87f1b --- /dev/null +++ b/deps/hiredis/adapters/macosx.h @@ -0,0 +1,115 @@ +// +// Created by Дмитрий Бахвалов on 13.07.15. +// Copyright (c) 2015 Dmitry Bakhvalov. All rights reserved. +// + +#ifndef __HIREDIS_MACOSX_H__ +#define __HIREDIS_MACOSX_H__ + +#include + +#include "../hiredis.h" +#include "../async.h" + +typedef struct { + redisAsyncContext *context; + CFSocketRef socketRef; + CFRunLoopSourceRef sourceRef; +} RedisRunLoop; + +static int freeRedisRunLoop(RedisRunLoop* redisRunLoop) { + if( redisRunLoop != NULL ) { + if( redisRunLoop->sourceRef != NULL ) { + CFRunLoopSourceInvalidate(redisRunLoop->sourceRef); + CFRelease(redisRunLoop->sourceRef); + } + if( redisRunLoop->socketRef != NULL ) { + CFSocketInvalidate(redisRunLoop->socketRef); + CFRelease(redisRunLoop->socketRef); + } + hi_free(redisRunLoop); + } + return REDIS_ERR; +} + +static void redisMacOSAddRead(void *privdata) { + RedisRunLoop *redisRunLoop = (RedisRunLoop*)privdata; + CFSocketEnableCallBacks(redisRunLoop->socketRef, kCFSocketReadCallBack); +} + +static void redisMacOSDelRead(void *privdata) { + RedisRunLoop *redisRunLoop = (RedisRunLoop*)privdata; + CFSocketDisableCallBacks(redisRunLoop->socketRef, kCFSocketReadCallBack); +} + +static void redisMacOSAddWrite(void *privdata) { + RedisRunLoop *redisRunLoop = (RedisRunLoop*)privdata; + CFSocketEnableCallBacks(redisRunLoop->socketRef, kCFSocketWriteCallBack); +} + +static void redisMacOSDelWrite(void *privdata) { + RedisRunLoop *redisRunLoop = (RedisRunLoop*)privdata; + CFSocketDisableCallBacks(redisRunLoop->socketRef, kCFSocketWriteCallBack); +} + +static void redisMacOSCleanup(void *privdata) { + RedisRunLoop *redisRunLoop = (RedisRunLoop*)privdata; + freeRedisRunLoop(redisRunLoop); +} + +static void redisMacOSAsyncCallback(CFSocketRef __unused s, CFSocketCallBackType callbackType, CFDataRef __unused address, const void __unused *data, void *info) { + redisAsyncContext* context = (redisAsyncContext*) info; + + switch (callbackType) { + case kCFSocketReadCallBack: + redisAsyncHandleRead(context); + break; + + case kCFSocketWriteCallBack: + redisAsyncHandleWrite(context); + break; + + default: + break; + } +} + +static int redisMacOSAttach(redisAsyncContext *redisAsyncCtx, CFRunLoopRef runLoop) { + redisContext *redisCtx = &(redisAsyncCtx->c); + + /* Nothing should be attached when something is already attached */ + if( redisAsyncCtx->ev.data != NULL ) return REDIS_ERR; + + RedisRunLoop* redisRunLoop = (RedisRunLoop*) hi_calloc(1, sizeof(RedisRunLoop)); + if (redisRunLoop == NULL) + return REDIS_ERR; + + /* Setup redis stuff */ + redisRunLoop->context = redisAsyncCtx; + + redisAsyncCtx->ev.addRead = redisMacOSAddRead; + redisAsyncCtx->ev.delRead = redisMacOSDelRead; + redisAsyncCtx->ev.addWrite = redisMacOSAddWrite; + redisAsyncCtx->ev.delWrite = redisMacOSDelWrite; + redisAsyncCtx->ev.cleanup = redisMacOSCleanup; + redisAsyncCtx->ev.data = redisRunLoop; + + /* Initialize and install read/write events */ + CFSocketContext socketCtx = { 0, redisAsyncCtx, NULL, NULL, NULL }; + + redisRunLoop->socketRef = CFSocketCreateWithNative(NULL, redisCtx->fd, + kCFSocketReadCallBack | kCFSocketWriteCallBack, + redisMacOSAsyncCallback, + &socketCtx); + if( !redisRunLoop->socketRef ) return freeRedisRunLoop(redisRunLoop); + + redisRunLoop->sourceRef = CFSocketCreateRunLoopSource(NULL, redisRunLoop->socketRef, 0); + if( !redisRunLoop->sourceRef ) return freeRedisRunLoop(redisRunLoop); + + CFRunLoopAddSource(runLoop, redisRunLoop->sourceRef, kCFRunLoopDefaultMode); + + return REDIS_OK; +} + +#endif + diff --git a/deps/hiredis/adapters/qt.h b/deps/hiredis/adapters/qt.h new file mode 100644 index 0000000..5cc02e6 --- /dev/null +++ b/deps/hiredis/adapters/qt.h @@ -0,0 +1,135 @@ +/*- + * Copyright (C) 2014 Pietro Cerutti + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef __HIREDIS_QT_H__ +#define __HIREDIS_QT_H__ +#include +#include "../async.h" + +static void RedisQtAddRead(void *); +static void RedisQtDelRead(void *); +static void RedisQtAddWrite(void *); +static void RedisQtDelWrite(void *); +static void RedisQtCleanup(void *); + +class RedisQtAdapter : public QObject { + + Q_OBJECT + + friend + void RedisQtAddRead(void * adapter) { + RedisQtAdapter * a = static_cast(adapter); + a->addRead(); + } + + friend + void RedisQtDelRead(void * adapter) { + RedisQtAdapter * a = static_cast(adapter); + a->delRead(); + } + + friend + void RedisQtAddWrite(void * adapter) { + RedisQtAdapter * a = static_cast(adapter); + a->addWrite(); + } + + friend + void RedisQtDelWrite(void * adapter) { + RedisQtAdapter * a = static_cast(adapter); + a->delWrite(); + } + + friend + void RedisQtCleanup(void * adapter) { + RedisQtAdapter * a = static_cast(adapter); + a->cleanup(); + } + + public: + RedisQtAdapter(QObject * parent = 0) + : QObject(parent), m_ctx(0), m_read(0), m_write(0) { } + + ~RedisQtAdapter() { + if (m_ctx != 0) { + m_ctx->ev.data = NULL; + } + } + + int setContext(redisAsyncContext * ac) { + if (ac->ev.data != NULL) { + return REDIS_ERR; + } + m_ctx = ac; + m_ctx->ev.data = this; + m_ctx->ev.addRead = RedisQtAddRead; + m_ctx->ev.delRead = RedisQtDelRead; + m_ctx->ev.addWrite = RedisQtAddWrite; + m_ctx->ev.delWrite = RedisQtDelWrite; + m_ctx->ev.cleanup = RedisQtCleanup; + return REDIS_OK; + } + + private: + void addRead() { + if (m_read) return; + m_read = new QSocketNotifier(m_ctx->c.fd, QSocketNotifier::Read, 0); + connect(m_read, SIGNAL(activated(int)), this, SLOT(read())); + } + + void delRead() { + if (!m_read) return; + delete m_read; + m_read = 0; + } + + void addWrite() { + if (m_write) return; + m_write = new QSocketNotifier(m_ctx->c.fd, QSocketNotifier::Write, 0); + connect(m_write, SIGNAL(activated(int)), this, SLOT(write())); + } + + void delWrite() { + if (!m_write) return; + delete m_write; + m_write = 0; + } + + void cleanup() { + delRead(); + delWrite(); + } + + private slots: + void read() { redisAsyncHandleRead(m_ctx); } + void write() { redisAsyncHandleWrite(m_ctx); } + + private: + redisAsyncContext * m_ctx; + QSocketNotifier * m_read; + QSocketNotifier * m_write; +}; + +#endif /* !__HIREDIS_QT_H__ */ diff --git a/deps/hiredis/alloc.c b/deps/hiredis/alloc.c new file mode 100644 index 0000000..0902286 --- /dev/null +++ b/deps/hiredis/alloc.c @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2020, Michael Grunder + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include "alloc.h" +#include +#include + +hiredisAllocFuncs hiredisAllocFns = { + .mallocFn = malloc, + .callocFn = calloc, + .reallocFn = realloc, + .strdupFn = strdup, + .freeFn = free, +}; + +/* Override hiredis' allocators with ones supplied by the user */ +hiredisAllocFuncs hiredisSetAllocators(hiredisAllocFuncs *override) { + hiredisAllocFuncs orig = hiredisAllocFns; + + hiredisAllocFns = *override; + + return orig; +} + +/* Reset allocators to use libc defaults */ +void hiredisResetAllocators(void) { + hiredisAllocFns = (hiredisAllocFuncs) { + .mallocFn = malloc, + .callocFn = calloc, + .reallocFn = realloc, + .strdupFn = strdup, + .freeFn = free, + }; +} + +#ifdef _WIN32 + +void *hi_malloc(size_t size) { + return hiredisAllocFns.mallocFn(size); +} + +void *hi_calloc(size_t nmemb, size_t size) { + /* Overflow check as the user can specify any arbitrary allocator */ + if (SIZE_MAX / size < nmemb) + return NULL; + + return hiredisAllocFns.callocFn(nmemb, size); +} + +void *hi_realloc(void *ptr, size_t size) { + return hiredisAllocFns.reallocFn(ptr, size); +} + +char *hi_strdup(const char *str) { + return hiredisAllocFns.strdupFn(str); +} + +void hi_free(void *ptr) { + hiredisAllocFns.freeFn(ptr); +} + +#endif diff --git a/deps/hiredis/alloc.h b/deps/hiredis/alloc.h new file mode 100644 index 0000000..771f9fe --- /dev/null +++ b/deps/hiredis/alloc.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2020, Michael Grunder + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HIREDIS_ALLOC_H +#define HIREDIS_ALLOC_H + +#include /* for size_t */ +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Structure pointing to our actually configured allocators */ +typedef struct hiredisAllocFuncs { + void *(*mallocFn)(size_t); + void *(*callocFn)(size_t,size_t); + void *(*reallocFn)(void*,size_t); + char *(*strdupFn)(const char*); + void (*freeFn)(void*); +} hiredisAllocFuncs; + +hiredisAllocFuncs hiredisSetAllocators(hiredisAllocFuncs *ha); +void hiredisResetAllocators(void); + +#ifndef _WIN32 + +/* Hiredis' configured allocator function pointer struct */ +extern hiredisAllocFuncs hiredisAllocFns; + +static inline void *hi_malloc(size_t size) { + return hiredisAllocFns.mallocFn(size); +} + +static inline void *hi_calloc(size_t nmemb, size_t size) { + /* Overflow check as the user can specify any arbitrary allocator */ + if (SIZE_MAX / size < nmemb) + return NULL; + + return hiredisAllocFns.callocFn(nmemb, size); +} + +static inline void *hi_realloc(void *ptr, size_t size) { + return hiredisAllocFns.reallocFn(ptr, size); +} + +static inline char *hi_strdup(const char *str) { + return hiredisAllocFns.strdupFn(str); +} + +static inline void hi_free(void *ptr) { + hiredisAllocFns.freeFn(ptr); +} + +#else + +void *hi_malloc(size_t size); +void *hi_calloc(size_t nmemb, size_t size); +void *hi_realloc(void *ptr, size_t size); +char *hi_strdup(const char *str); +void hi_free(void *ptr); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* HIREDIS_ALLOC_H */ diff --git a/deps/hiredis/appveyor.yml b/deps/hiredis/appveyor.yml new file mode 100644 index 0000000..5b43fdb --- /dev/null +++ b/deps/hiredis/appveyor.yml @@ -0,0 +1,24 @@ +# Appveyor configuration file for CI build of hiredis on Windows (under Cygwin) +environment: + matrix: + - CYG_BASH: C:\cygwin64\bin\bash + CC: gcc + - CYG_BASH: C:\cygwin\bin\bash + CC: gcc + CFLAGS: -m32 + CXXFLAGS: -m32 + LDFLAGS: -m32 + +clone_depth: 1 + +# Attempt to ensure we don't try to convert line endings to Win32 CRLF as this will cause build to fail +init: + - git config --global core.autocrlf input + +# Install needed build dependencies +install: + - '%CYG_BASH% -lc "cygcheck -dc cygwin"' + +build_script: + - 'echo building...' + - '%CYG_BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0 + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include "alloc.h" +#include +#include +#ifndef _MSC_VER +#include +#endif +#include +#include +#include +#include "async.h" +#include "net.h" +#include "dict.c" +#include "sds.h" +#include "win32.h" + +#include "async_private.h" + +#ifdef NDEBUG +#undef assert +#define assert(e) (void)(e) +#endif + +/* Forward declarations of hiredis.c functions */ +int __redisAppendCommand(redisContext *c, const char *cmd, size_t len); +void __redisSetError(redisContext *c, int type, const char *str); + +/* Functions managing dictionary of callbacks for pub/sub. */ +static unsigned int callbackHash(const void *key) { + return dictGenHashFunction((const unsigned char *)key, + hi_sdslen((const hisds)key)); +} + +static void *callbackValDup(void *privdata, const void *src) { + ((void) privdata); + redisCallback *dup; + + dup = hi_malloc(sizeof(*dup)); + if (dup == NULL) + return NULL; + + memcpy(dup,src,sizeof(*dup)); + return dup; +} + +static int callbackKeyCompare(void *privdata, const void *key1, const void *key2) { + int l1, l2; + ((void) privdata); + + l1 = hi_sdslen((const hisds)key1); + l2 = hi_sdslen((const hisds)key2); + if (l1 != l2) return 0; + return memcmp(key1,key2,l1) == 0; +} + +static void callbackKeyDestructor(void *privdata, void *key) { + ((void) privdata); + hi_sdsfree((hisds)key); +} + +static void callbackValDestructor(void *privdata, void *val) { + ((void) privdata); + hi_free(val); +} + +static dictType callbackDict = { + callbackHash, + NULL, + callbackValDup, + callbackKeyCompare, + callbackKeyDestructor, + callbackValDestructor +}; + +static redisAsyncContext *redisAsyncInitialize(redisContext *c) { + redisAsyncContext *ac; + dict *channels = NULL, *patterns = NULL; + + channels = dictCreate(&callbackDict,NULL); + if (channels == NULL) + goto oom; + + patterns = dictCreate(&callbackDict,NULL); + if (patterns == NULL) + goto oom; + + ac = hi_realloc(c,sizeof(redisAsyncContext)); + if (ac == NULL) + goto oom; + + c = &(ac->c); + + /* The regular connect functions will always set the flag REDIS_CONNECTED. + * For the async API, we want to wait until the first write event is + * received up before setting this flag, so reset it here. */ + c->flags &= ~REDIS_CONNECTED; + + ac->err = 0; + ac->errstr = NULL; + ac->data = NULL; + ac->dataCleanup = NULL; + + ac->ev.data = NULL; + ac->ev.addRead = NULL; + ac->ev.delRead = NULL; + ac->ev.addWrite = NULL; + ac->ev.delWrite = NULL; + ac->ev.cleanup = NULL; + ac->ev.scheduleTimer = NULL; + + ac->onConnect = NULL; + ac->onDisconnect = NULL; + + ac->replies.head = NULL; + ac->replies.tail = NULL; + ac->sub.replies.head = NULL; + ac->sub.replies.tail = NULL; + ac->sub.channels = channels; + ac->sub.patterns = patterns; + + return ac; +oom: + if (channels) dictRelease(channels); + if (patterns) dictRelease(patterns); + return NULL; +} + +/* We want the error field to be accessible directly instead of requiring + * an indirection to the redisContext struct. */ +static void __redisAsyncCopyError(redisAsyncContext *ac) { + if (!ac) + return; + + redisContext *c = &(ac->c); + ac->err = c->err; + ac->errstr = c->errstr; +} + +redisAsyncContext *redisAsyncConnectWithOptions(const redisOptions *options) { + redisOptions myOptions = *options; + redisContext *c; + redisAsyncContext *ac; + + /* Clear any erroneously set sync callback and flag that we don't want to + * use freeReplyObject by default. */ + myOptions.push_cb = NULL; + myOptions.options |= REDIS_OPT_NO_PUSH_AUTOFREE; + + myOptions.options |= REDIS_OPT_NONBLOCK; + c = redisConnectWithOptions(&myOptions); + if (c == NULL) { + return NULL; + } + + ac = redisAsyncInitialize(c); + if (ac == NULL) { + redisFree(c); + return NULL; + } + + /* Set any configured async push handler */ + redisAsyncSetPushCallback(ac, myOptions.async_push_cb); + + __redisAsyncCopyError(ac); + return ac; +} + +redisAsyncContext *redisAsyncConnect(const char *ip, int port) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + return redisAsyncConnectWithOptions(&options); +} + +redisAsyncContext *redisAsyncConnectBind(const char *ip, int port, + const char *source_addr) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + options.endpoint.tcp.source_addr = source_addr; + return redisAsyncConnectWithOptions(&options); +} + +redisAsyncContext *redisAsyncConnectBindWithReuse(const char *ip, int port, + const char *source_addr) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + options.options |= REDIS_OPT_REUSEADDR; + options.endpoint.tcp.source_addr = source_addr; + return redisAsyncConnectWithOptions(&options); +} + +redisAsyncContext *redisAsyncConnectUnix(const char *path) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_UNIX(&options, path); + return redisAsyncConnectWithOptions(&options); +} + +int redisAsyncSetConnectCallback(redisAsyncContext *ac, redisConnectCallback *fn) { + if (ac->onConnect == NULL) { + ac->onConnect = fn; + + /* The common way to detect an established connection is to wait for + * the first write event to be fired. This assumes the related event + * library functions are already set. */ + _EL_ADD_WRITE(ac); + return REDIS_OK; + } + return REDIS_ERR; +} + +int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn) { + if (ac->onDisconnect == NULL) { + ac->onDisconnect = fn; + return REDIS_OK; + } + return REDIS_ERR; +} + +/* Helper functions to push/shift callbacks */ +static int __redisPushCallback(redisCallbackList *list, redisCallback *source) { + redisCallback *cb; + + /* Copy callback from stack to heap */ + cb = hi_malloc(sizeof(*cb)); + if (cb == NULL) + return REDIS_ERR_OOM; + + if (source != NULL) { + memcpy(cb,source,sizeof(*cb)); + cb->next = NULL; + } + + /* Store callback in list */ + if (list->head == NULL) + list->head = cb; + if (list->tail != NULL) + list->tail->next = cb; + list->tail = cb; + return REDIS_OK; +} + +static int __redisShiftCallback(redisCallbackList *list, redisCallback *target) { + redisCallback *cb = list->head; + if (cb != NULL) { + list->head = cb->next; + if (cb == list->tail) + list->tail = NULL; + + /* Copy callback from heap to stack */ + if (target != NULL) + memcpy(target,cb,sizeof(*cb)); + hi_free(cb); + return REDIS_OK; + } + return REDIS_ERR; +} + +static void __redisRunCallback(redisAsyncContext *ac, redisCallback *cb, redisReply *reply) { + redisContext *c = &(ac->c); + if (cb->fn != NULL) { + c->flags |= REDIS_IN_CALLBACK; + cb->fn(ac,reply,cb->privdata); + c->flags &= ~REDIS_IN_CALLBACK; + } +} + +static void __redisRunPushCallback(redisAsyncContext *ac, redisReply *reply) { + if (ac->push_cb != NULL) { + ac->c.flags |= REDIS_IN_CALLBACK; + ac->push_cb(ac, reply); + ac->c.flags &= ~REDIS_IN_CALLBACK; + } +} + +/* Helper function to free the context. */ +static void __redisAsyncFree(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + redisCallback cb; + dictIterator it; + dictEntry *de; + + /* Execute pending callbacks with NULL reply. */ + while (__redisShiftCallback(&ac->replies,&cb) == REDIS_OK) + __redisRunCallback(ac,&cb,NULL); + while (__redisShiftCallback(&ac->sub.replies,&cb) == REDIS_OK) + __redisRunCallback(ac,&cb,NULL); + + /* Run subscription callbacks with NULL reply */ + if (ac->sub.channels) { + dictInitIterator(&it,ac->sub.channels); + while ((de = dictNext(&it)) != NULL) + __redisRunCallback(ac,dictGetEntryVal(de),NULL); + + dictRelease(ac->sub.channels); + } + + if (ac->sub.patterns) { + dictInitIterator(&it,ac->sub.patterns); + while ((de = dictNext(&it)) != NULL) + __redisRunCallback(ac,dictGetEntryVal(de),NULL); + + dictRelease(ac->sub.patterns); + } + + /* Signal event lib to clean up */ + _EL_CLEANUP(ac); + + /* Execute disconnect callback. When redisAsyncFree() initiated destroying + * this context, the status will always be REDIS_OK. */ + if (ac->onDisconnect && (c->flags & REDIS_CONNECTED)) { + if (c->flags & REDIS_FREEING) { + ac->onDisconnect(ac,REDIS_OK); + } else { + ac->onDisconnect(ac,(ac->err == 0) ? REDIS_OK : REDIS_ERR); + } + } + + if (ac->dataCleanup) { + ac->dataCleanup(ac->data); + } + + /* Cleanup self */ + redisFree(c); +} + +/* Free the async context. When this function is called from a callback, + * control needs to be returned to redisProcessCallbacks() before actual + * free'ing. To do so, a flag is set on the context which is picked up by + * redisProcessCallbacks(). Otherwise, the context is immediately free'd. */ +void redisAsyncFree(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + c->flags |= REDIS_FREEING; + if (!(c->flags & REDIS_IN_CALLBACK)) + __redisAsyncFree(ac); +} + +/* Helper function to make the disconnect happen and clean up. */ +void __redisAsyncDisconnect(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + + /* Make sure error is accessible if there is any */ + __redisAsyncCopyError(ac); + + if (ac->err == 0) { + /* For clean disconnects, there should be no pending callbacks. */ + int ret = __redisShiftCallback(&ac->replies,NULL); + assert(ret == REDIS_ERR); + } else { + /* Disconnection is caused by an error, make sure that pending + * callbacks cannot call new commands. */ + c->flags |= REDIS_DISCONNECTING; + } + + /* cleanup event library on disconnect. + * this is safe to call multiple times */ + _EL_CLEANUP(ac); + + /* For non-clean disconnects, __redisAsyncFree() will execute pending + * callbacks with a NULL-reply. */ + if (!(c->flags & REDIS_NO_AUTO_FREE)) { + __redisAsyncFree(ac); + } +} + +/* Tries to do a clean disconnect from Redis, meaning it stops new commands + * from being issued, but tries to flush the output buffer and execute + * callbacks for all remaining replies. When this function is called from a + * callback, there might be more replies and we can safely defer disconnecting + * to redisProcessCallbacks(). Otherwise, we can only disconnect immediately + * when there are no pending callbacks. */ +void redisAsyncDisconnect(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + c->flags |= REDIS_DISCONNECTING; + + /** unset the auto-free flag here, because disconnect undoes this */ + c->flags &= ~REDIS_NO_AUTO_FREE; + if (!(c->flags & REDIS_IN_CALLBACK) && ac->replies.head == NULL) + __redisAsyncDisconnect(ac); +} + +static int __redisGetSubscribeCallback(redisAsyncContext *ac, redisReply *reply, redisCallback *dstcb) { + redisContext *c = &(ac->c); + dict *callbacks; + redisCallback *cb; + dictEntry *de; + int pvariant; + char *stype; + hisds sname; + + /* Match reply with the expected format of a pushed message. + * The type and number of elements (3 to 4) are specified at: + * https://redis.io/topics/pubsub#format-of-pushed-messages */ + if ((reply->type == REDIS_REPLY_ARRAY && !(c->flags & REDIS_SUPPORTS_PUSH) && reply->elements >= 3) || + reply->type == REDIS_REPLY_PUSH) { + assert(reply->element[0]->type == REDIS_REPLY_STRING); + stype = reply->element[0]->str; + pvariant = (tolower(stype[0]) == 'p') ? 1 : 0; + + if (pvariant) + callbacks = ac->sub.patterns; + else + callbacks = ac->sub.channels; + + /* Locate the right callback */ + assert(reply->element[1]->type == REDIS_REPLY_STRING); + sname = hi_sdsnewlen(reply->element[1]->str,reply->element[1]->len); + if (sname == NULL) + goto oom; + + de = dictFind(callbacks,sname); + if (de != NULL) { + cb = dictGetEntryVal(de); + + /* If this is an subscribe reply decrease pending counter. */ + if (strcasecmp(stype+pvariant,"subscribe") == 0) { + cb->pending_subs -= 1; + } + + memcpy(dstcb,cb,sizeof(*dstcb)); + + /* If this is an unsubscribe message, remove it. */ + if (strcasecmp(stype+pvariant,"unsubscribe") == 0) { + if (cb->pending_subs == 0) + dictDelete(callbacks,sname); + + /* If this was the last unsubscribe message, revert to + * non-subscribe mode. */ + assert(reply->element[2]->type == REDIS_REPLY_INTEGER); + + /* Unset subscribed flag only when no pipelined pending subscribe. */ + if (reply->element[2]->integer == 0 + && dictSize(ac->sub.channels) == 0 + && dictSize(ac->sub.patterns) == 0) { + c->flags &= ~REDIS_SUBSCRIBED; + + /* Move ongoing regular command callbacks. */ + redisCallback cb; + while (__redisShiftCallback(&ac->sub.replies,&cb) == REDIS_OK) { + __redisPushCallback(&ac->replies,&cb); + } + } + } + } + hi_sdsfree(sname); + } else { + /* Shift callback for pending command in subscribed context. */ + __redisShiftCallback(&ac->sub.replies,dstcb); + } + return REDIS_OK; +oom: + __redisSetError(&(ac->c), REDIS_ERR_OOM, "Out of memory"); + return REDIS_ERR; +} + +#define redisIsSpontaneousPushReply(r) \ + (redisIsPushReply(r) && !redisIsSubscribeReply(r)) + +static int redisIsSubscribeReply(redisReply *reply) { + char *str; + size_t len, off; + + /* We will always have at least one string with the subscribe/message type */ + if (reply->elements < 1 || reply->element[0]->type != REDIS_REPLY_STRING || + reply->element[0]->len < sizeof("message") - 1) + { + return 0; + } + + /* Get the string/len moving past 'p' if needed */ + off = tolower(reply->element[0]->str[0]) == 'p'; + str = reply->element[0]->str + off; + len = reply->element[0]->len - off; + + return !strncasecmp(str, "subscribe", len) || + !strncasecmp(str, "message", len) || + !strncasecmp(str, "unsubscribe", len); +} + +void redisProcessCallbacks(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + void *reply = NULL; + int status; + + while((status = redisGetReply(c,&reply)) == REDIS_OK) { + if (reply == NULL) { + /* When the connection is being disconnected and there are + * no more replies, this is the cue to really disconnect. */ + if (c->flags & REDIS_DISCONNECTING && hi_sdslen(c->obuf) == 0 + && ac->replies.head == NULL) { + __redisAsyncDisconnect(ac); + return; + } + /* When the connection is not being disconnected, simply stop + * trying to get replies and wait for the next loop tick. */ + break; + } + + /* Keep track of push message support for subscribe handling */ + if (redisIsPushReply(reply)) c->flags |= REDIS_SUPPORTS_PUSH; + + /* Send any non-subscribe related PUSH messages to our PUSH handler + * while allowing subscribe related PUSH messages to pass through. + * This allows existing code to be backward compatible and work in + * either RESP2 or RESP3 mode. */ + if (redisIsSpontaneousPushReply(reply)) { + __redisRunPushCallback(ac, reply); + c->reader->fn->freeObject(reply); + continue; + } + + /* Even if the context is subscribed, pending regular + * callbacks will get a reply before pub/sub messages arrive. */ + redisCallback cb = {NULL, NULL, 0, NULL}; + if (__redisShiftCallback(&ac->replies,&cb) != REDIS_OK) { + /* + * A spontaneous reply in a not-subscribed context can be the error + * reply that is sent when a new connection exceeds the maximum + * number of allowed connections on the server side. + * + * This is seen as an error instead of a regular reply because the + * server closes the connection after sending it. + * + * To prevent the error from being overwritten by an EOF error the + * connection is closed here. See issue #43. + * + * Another possibility is that the server is loading its dataset. + * In this case we also want to close the connection, and have the + * user wait until the server is ready to take our request. + */ + if (((redisReply*)reply)->type == REDIS_REPLY_ERROR) { + c->err = REDIS_ERR_OTHER; + snprintf(c->errstr,sizeof(c->errstr),"%s",((redisReply*)reply)->str); + c->reader->fn->freeObject(reply); + __redisAsyncDisconnect(ac); + return; + } + /* No more regular callbacks and no errors, the context *must* be subscribed. */ + assert(c->flags & REDIS_SUBSCRIBED); + if (c->flags & REDIS_SUBSCRIBED) + __redisGetSubscribeCallback(ac,reply,&cb); + } + + if (cb.fn != NULL) { + __redisRunCallback(ac,&cb,reply); + if (!(c->flags & REDIS_NO_AUTO_FREE_REPLIES)){ + c->reader->fn->freeObject(reply); + } + + /* Proceed with free'ing when redisAsyncFree() was called. */ + if (c->flags & REDIS_FREEING) { + __redisAsyncFree(ac); + return; + } + } else { + /* No callback for this reply. This can either be a NULL callback, + * or there were no callbacks to begin with. Either way, don't + * abort with an error, but simply ignore it because the client + * doesn't know what the server will spit out over the wire. */ + c->reader->fn->freeObject(reply); + } + + /* If in monitor mode, repush the callback */ + if (c->flags & REDIS_MONITORING) { + __redisPushCallback(&ac->replies,&cb); + } + } + + /* Disconnect when there was an error reading the reply */ + if (status != REDIS_OK) + __redisAsyncDisconnect(ac); +} + +static void __redisAsyncHandleConnectFailure(redisAsyncContext *ac) { + if (ac->onConnect) ac->onConnect(ac, REDIS_ERR); + __redisAsyncDisconnect(ac); +} + +/* Internal helper function to detect socket status the first time a read or + * write event fires. When connecting was not successful, the connect callback + * is called with a REDIS_ERR status and the context is free'd. */ +static int __redisAsyncHandleConnect(redisAsyncContext *ac) { + int completed = 0; + redisContext *c = &(ac->c); + + if (redisCheckConnectDone(c, &completed) == REDIS_ERR) { + /* Error! */ + if (redisCheckSocketError(c) == REDIS_ERR) + __redisAsyncCopyError(ac); + __redisAsyncHandleConnectFailure(ac); + return REDIS_ERR; + } else if (completed == 1) { + /* connected! */ + if (c->connection_type == REDIS_CONN_TCP && + redisSetTcpNoDelay(c) == REDIS_ERR) { + __redisAsyncHandleConnectFailure(ac); + return REDIS_ERR; + } + + if (ac->onConnect) ac->onConnect(ac, REDIS_OK); + c->flags |= REDIS_CONNECTED; + return REDIS_OK; + } else { + return REDIS_OK; + } +} + +void redisAsyncRead(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + + if (redisBufferRead(c) == REDIS_ERR) { + __redisAsyncDisconnect(ac); + } else { + /* Always re-schedule reads */ + _EL_ADD_READ(ac); + redisProcessCallbacks(ac); + } +} + +/* This function should be called when the socket is readable. + * It processes all replies that can be read and executes their callbacks. + */ +void redisAsyncHandleRead(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + + if (!(c->flags & REDIS_CONNECTED)) { + /* Abort connect was not successful. */ + if (__redisAsyncHandleConnect(ac) != REDIS_OK) + return; + /* Try again later when the context is still not connected. */ + if (!(c->flags & REDIS_CONNECTED)) + return; + } + + c->funcs->async_read(ac); +} + +void redisAsyncWrite(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + int done = 0; + + if (redisBufferWrite(c,&done) == REDIS_ERR) { + __redisAsyncDisconnect(ac); + } else { + /* Continue writing when not done, stop writing otherwise */ + if (!done) + _EL_ADD_WRITE(ac); + else + _EL_DEL_WRITE(ac); + + /* Always schedule reads after writes */ + _EL_ADD_READ(ac); + } +} + +void redisAsyncHandleWrite(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + + if (!(c->flags & REDIS_CONNECTED)) { + /* Abort connect was not successful. */ + if (__redisAsyncHandleConnect(ac) != REDIS_OK) + return; + /* Try again later when the context is still not connected. */ + if (!(c->flags & REDIS_CONNECTED)) + return; + } + + c->funcs->async_write(ac); +} + +void redisAsyncHandleTimeout(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + redisCallback cb; + + if ((c->flags & REDIS_CONNECTED)) { + if (ac->replies.head == NULL && ac->sub.replies.head == NULL) { + /* Nothing to do - just an idle timeout */ + return; + } + + if (!ac->c.command_timeout || + (!ac->c.command_timeout->tv_sec && !ac->c.command_timeout->tv_usec)) { + /* A belated connect timeout arriving, ignore */ + return; + } + } + + if (!c->err) { + __redisSetError(c, REDIS_ERR_TIMEOUT, "Timeout"); + __redisAsyncCopyError(ac); + } + + if (!(c->flags & REDIS_CONNECTED) && ac->onConnect) { + ac->onConnect(ac, REDIS_ERR); + } + + while (__redisShiftCallback(&ac->replies, &cb) == REDIS_OK) { + __redisRunCallback(ac, &cb, NULL); + } + + /** + * TODO: Don't automatically sever the connection, + * rather, allow to ignore responses before the queue is clear + */ + __redisAsyncDisconnect(ac); +} + +/* Sets a pointer to the first argument and its length starting at p. Returns + * the number of bytes to skip to get to the following argument. */ +static const char *nextArgument(const char *start, const char **str, size_t *len) { + const char *p = start; + if (p[0] != '$') { + p = strchr(p,'$'); + if (p == NULL) return NULL; + } + + *len = (int)strtol(p+1,NULL,10); + p = strchr(p,'\r'); + assert(p); + *str = p+2; + return p+2+(*len)+2; +} + +/* Helper function for the redisAsyncCommand* family of functions. Writes a + * formatted command to the output buffer and registers the provided callback + * function with the context. */ +static int __redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *cmd, size_t len) { + redisContext *c = &(ac->c); + redisCallback cb; + struct dict *cbdict; + dictEntry *de; + redisCallback *existcb; + int pvariant, hasnext; + const char *cstr, *astr; + size_t clen, alen; + const char *p; + hisds sname; + int ret; + + /* Don't accept new commands when the connection is about to be closed. */ + if (c->flags & (REDIS_DISCONNECTING | REDIS_FREEING)) return REDIS_ERR; + + /* Setup callback */ + cb.fn = fn; + cb.privdata = privdata; + cb.pending_subs = 1; + + /* Find out which command will be appended. */ + p = nextArgument(cmd,&cstr,&clen); + assert(p != NULL); + hasnext = (p[0] == '$'); + pvariant = (tolower(cstr[0]) == 'p') ? 1 : 0; + cstr += pvariant; + clen -= pvariant; + + if (hasnext && strncasecmp(cstr,"subscribe\r\n",11) == 0) { + c->flags |= REDIS_SUBSCRIBED; + + /* Add every channel/pattern to the list of subscription callbacks. */ + while ((p = nextArgument(p,&astr,&alen)) != NULL) { + sname = hi_sdsnewlen(astr,alen); + if (sname == NULL) + goto oom; + + if (pvariant) + cbdict = ac->sub.patterns; + else + cbdict = ac->sub.channels; + + de = dictFind(cbdict,sname); + + if (de != NULL) { + existcb = dictGetEntryVal(de); + cb.pending_subs = existcb->pending_subs + 1; + } + + ret = dictReplace(cbdict,sname,&cb); + + if (ret == 0) hi_sdsfree(sname); + } + } else if (strncasecmp(cstr,"unsubscribe\r\n",13) == 0) { + /* It is only useful to call (P)UNSUBSCRIBE when the context is + * subscribed to one or more channels or patterns. */ + if (!(c->flags & REDIS_SUBSCRIBED)) return REDIS_ERR; + + /* (P)UNSUBSCRIBE does not have its own response: every channel or + * pattern that is unsubscribed will receive a message. This means we + * should not append a callback function for this command. */ + } else if (strncasecmp(cstr,"monitor\r\n",9) == 0) { + /* Set monitor flag and push callback */ + c->flags |= REDIS_MONITORING; + if (__redisPushCallback(&ac->replies,&cb) != REDIS_OK) + goto oom; + } else { + if (c->flags & REDIS_SUBSCRIBED) { + if (__redisPushCallback(&ac->sub.replies,&cb) != REDIS_OK) + goto oom; + } else { + if (__redisPushCallback(&ac->replies,&cb) != REDIS_OK) + goto oom; + } + } + + __redisAppendCommand(c,cmd,len); + + /* Always schedule a write when the write buffer is non-empty */ + _EL_ADD_WRITE(ac); + + return REDIS_OK; +oom: + __redisSetError(&(ac->c), REDIS_ERR_OOM, "Out of memory"); + __redisAsyncCopyError(ac); + return REDIS_ERR; +} + +int redisvAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, va_list ap) { + char *cmd; + int len; + int status; + len = redisvFormatCommand(&cmd,format,ap); + + /* We don't want to pass -1 or -2 to future functions as a length. */ + if (len < 0) + return REDIS_ERR; + + status = __redisAsyncCommand(ac,fn,privdata,cmd,len); + hi_free(cmd); + return status; +} + +int redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, ...) { + va_list ap; + int status; + va_start(ap,format); + status = redisvAsyncCommand(ac,fn,privdata,format,ap); + va_end(ap); + return status; +} + +int redisAsyncCommandArgv(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, int argc, const char **argv, const size_t *argvlen) { + hisds cmd; + long long len; + int status; + len = redisFormatSdsCommandArgv(&cmd,argc,argv,argvlen); + if (len < 0) + return REDIS_ERR; + status = __redisAsyncCommand(ac,fn,privdata,cmd,len); + hi_sdsfree(cmd); + return status; +} + +int redisAsyncFormattedCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *cmd, size_t len) { + int status = __redisAsyncCommand(ac,fn,privdata,cmd,len); + return status; +} + +redisAsyncPushFn *redisAsyncSetPushCallback(redisAsyncContext *ac, redisAsyncPushFn *fn) { + redisAsyncPushFn *old = ac->push_cb; + ac->push_cb = fn; + return old; +} + +int redisAsyncSetTimeout(redisAsyncContext *ac, struct timeval tv) { + if (!ac->c.command_timeout) { + ac->c.command_timeout = hi_calloc(1, sizeof(tv)); + if (ac->c.command_timeout == NULL) { + __redisSetError(&ac->c, REDIS_ERR_OOM, "Out of memory"); + __redisAsyncCopyError(ac); + return REDIS_ERR; + } + } + + if (tv.tv_sec != ac->c.command_timeout->tv_sec || + tv.tv_usec != ac->c.command_timeout->tv_usec) + { + *ac->c.command_timeout = tv; + } + + return REDIS_OK; +} diff --git a/deps/hiredis/async.h b/deps/hiredis/async.h new file mode 100644 index 0000000..4c65203 --- /dev/null +++ b/deps/hiredis/async.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_ASYNC_H +#define __HIREDIS_ASYNC_H +#include "hiredis.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct redisAsyncContext; /* need forward declaration of redisAsyncContext */ +struct dict; /* dictionary header is included in async.c */ + +/* Reply callback prototype and container */ +typedef void (redisCallbackFn)(struct redisAsyncContext*, void*, void*); +typedef struct redisCallback { + struct redisCallback *next; /* simple singly linked list */ + redisCallbackFn *fn; + int pending_subs; + void *privdata; +} redisCallback; + +/* List of callbacks for either regular replies or pub/sub */ +typedef struct redisCallbackList { + redisCallback *head, *tail; +} redisCallbackList; + +/* Connection callback prototypes */ +typedef void (redisDisconnectCallback)(const struct redisAsyncContext*, int status); +typedef void (redisConnectCallback)(const struct redisAsyncContext*, int status); +typedef void(redisTimerCallback)(void *timer, void *privdata); + +/* Context for an async connection to Redis */ +typedef struct redisAsyncContext { + /* Hold the regular context, so it can be realloc'ed. */ + redisContext c; + + /* Setup error flags so they can be used directly. */ + int err; + char *errstr; + + /* Not used by hiredis */ + void *data; + void (*dataCleanup)(void *privdata); + + /* Event library data and hooks */ + struct { + void *data; + + /* Hooks that are called when the library expects to start + * reading/writing. These functions should be idempotent. */ + void (*addRead)(void *privdata); + void (*delRead)(void *privdata); + void (*addWrite)(void *privdata); + void (*delWrite)(void *privdata); + void (*cleanup)(void *privdata); + void (*scheduleTimer)(void *privdata, struct timeval tv); + } ev; + + /* Called when either the connection is terminated due to an error or per + * user request. The status is set accordingly (REDIS_OK, REDIS_ERR). */ + redisDisconnectCallback *onDisconnect; + + /* Called when the first write event was received. */ + redisConnectCallback *onConnect; + + /* Regular command callbacks */ + redisCallbackList replies; + + /* Address used for connect() */ + struct sockaddr *saddr; + size_t addrlen; + + /* Subscription callbacks */ + struct { + redisCallbackList replies; + struct dict *channels; + struct dict *patterns; + } sub; + + /* Any configured RESP3 PUSH handler */ + redisAsyncPushFn *push_cb; +} redisAsyncContext; + +/* Functions that proxy to hiredis */ +redisAsyncContext *redisAsyncConnectWithOptions(const redisOptions *options); +redisAsyncContext *redisAsyncConnect(const char *ip, int port); +redisAsyncContext *redisAsyncConnectBind(const char *ip, int port, const char *source_addr); +redisAsyncContext *redisAsyncConnectBindWithReuse(const char *ip, int port, + const char *source_addr); +redisAsyncContext *redisAsyncConnectUnix(const char *path); +int redisAsyncSetConnectCallback(redisAsyncContext *ac, redisConnectCallback *fn); +int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn); + +redisAsyncPushFn *redisAsyncSetPushCallback(redisAsyncContext *ac, redisAsyncPushFn *fn); +int redisAsyncSetTimeout(redisAsyncContext *ac, struct timeval tv); +void redisAsyncDisconnect(redisAsyncContext *ac); +void redisAsyncFree(redisAsyncContext *ac); + +/* Handle read/write events */ +void redisAsyncHandleRead(redisAsyncContext *ac); +void redisAsyncHandleWrite(redisAsyncContext *ac); +void redisAsyncHandleTimeout(redisAsyncContext *ac); +void redisAsyncRead(redisAsyncContext *ac); +void redisAsyncWrite(redisAsyncContext *ac); + +/* Command functions for an async context. Write the command to the + * output buffer and register the provided callback. */ +int redisvAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, va_list ap); +int redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, ...); +int redisAsyncCommandArgv(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, int argc, const char **argv, const size_t *argvlen); +int redisAsyncFormattedCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *cmd, size_t len); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/deps/hiredis/async_private.h b/deps/hiredis/async_private.h new file mode 100644 index 0000000..ea0558d --- /dev/null +++ b/deps/hiredis/async_private.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_ASYNC_PRIVATE_H +#define __HIREDIS_ASYNC_PRIVATE_H + +#define _EL_ADD_READ(ctx) \ + do { \ + refreshTimeout(ctx); \ + if ((ctx)->ev.addRead) (ctx)->ev.addRead((ctx)->ev.data); \ + } while (0) +#define _EL_DEL_READ(ctx) do { \ + if ((ctx)->ev.delRead) (ctx)->ev.delRead((ctx)->ev.data); \ + } while(0) +#define _EL_ADD_WRITE(ctx) \ + do { \ + refreshTimeout(ctx); \ + if ((ctx)->ev.addWrite) (ctx)->ev.addWrite((ctx)->ev.data); \ + } while (0) +#define _EL_DEL_WRITE(ctx) do { \ + if ((ctx)->ev.delWrite) (ctx)->ev.delWrite((ctx)->ev.data); \ + } while(0) +#define _EL_CLEANUP(ctx) do { \ + if ((ctx)->ev.cleanup) (ctx)->ev.cleanup((ctx)->ev.data); \ + ctx->ev.cleanup = NULL; \ + } while(0) + +static inline void refreshTimeout(redisAsyncContext *ctx) { + #define REDIS_TIMER_ISSET(tvp) \ + (tvp && ((tvp)->tv_sec || (tvp)->tv_usec)) + + #define REDIS_EL_TIMER(ac, tvp) \ + if ((ac)->ev.scheduleTimer && REDIS_TIMER_ISSET(tvp)) { \ + (ac)->ev.scheduleTimer((ac)->ev.data, *(tvp)); \ + } + + if (ctx->c.flags & REDIS_CONNECTED) { + REDIS_EL_TIMER(ctx, ctx->c.command_timeout); + } else { + REDIS_EL_TIMER(ctx, ctx->c.connect_timeout); + } +} + +void __redisAsyncDisconnect(redisAsyncContext *ac); +void redisProcessCallbacks(redisAsyncContext *ac); + +#endif /* __HIREDIS_ASYNC_PRIVATE_H */ diff --git a/deps/hiredis/dict.c b/deps/hiredis/dict.c new file mode 100644 index 0000000..ad57181 --- /dev/null +++ b/deps/hiredis/dict.c @@ -0,0 +1,343 @@ +/* Hash table implementation. + * + * This file implements in memory hash tables with insert/del/replace/find/ + * get-random-element operations. Hash tables will auto resize if needed + * tables of power of two in size are used, collisions are handled by + * chaining. See the source code for more information... :) + * + * Copyright (c) 2006-2010, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include "alloc.h" +#include +#include +#include +#include "dict.h" + +/* -------------------------- private prototypes ---------------------------- */ + +static int _dictExpandIfNeeded(dict *ht); +static unsigned long _dictNextPower(unsigned long size); +static int _dictKeyIndex(dict *ht, const void *key); +static int _dictInit(dict *ht, dictType *type, void *privDataPtr); + +/* -------------------------- hash functions -------------------------------- */ + +/* Generic hash function (a popular one from Bernstein). + * I tested a few and this was the best. */ +static unsigned int dictGenHashFunction(const unsigned char *buf, int len) { + unsigned int hash = 5381; + + while (len--) + hash = ((hash << 5) + hash) + (*buf++); /* hash * 33 + c */ + return hash; +} + +/* ----------------------------- API implementation ------------------------- */ + +/* Reset an hashtable already initialized with ht_init(). + * NOTE: This function should only called by ht_destroy(). */ +static void _dictReset(dict *ht) { + ht->table = NULL; + ht->size = 0; + ht->sizemask = 0; + ht->used = 0; +} + +/* Create a new hash table */ +static dict *dictCreate(dictType *type, void *privDataPtr) { + dict *ht = hi_malloc(sizeof(*ht)); + if (ht == NULL) + return NULL; + + _dictInit(ht,type,privDataPtr); + return ht; +} + +/* Initialize the hash table */ +static int _dictInit(dict *ht, dictType *type, void *privDataPtr) { + _dictReset(ht); + ht->type = type; + ht->privdata = privDataPtr; + return DICT_OK; +} + +/* Expand or create the hashtable */ +static int dictExpand(dict *ht, unsigned long size) { + dict n; /* the new hashtable */ + unsigned long realsize = _dictNextPower(size), i; + + /* the size is invalid if it is smaller than the number of + * elements already inside the hashtable */ + if (ht->used > size) + return DICT_ERR; + + _dictInit(&n, ht->type, ht->privdata); + n.size = realsize; + n.sizemask = realsize-1; + n.table = hi_calloc(realsize,sizeof(dictEntry*)); + if (n.table == NULL) + return DICT_ERR; + + /* Copy all the elements from the old to the new table: + * note that if the old hash table is empty ht->size is zero, + * so dictExpand just creates an hash table. */ + n.used = ht->used; + for (i = 0; i < ht->size && ht->used > 0; i++) { + dictEntry *he, *nextHe; + + if (ht->table[i] == NULL) continue; + + /* For each hash entry on this slot... */ + he = ht->table[i]; + while(he) { + unsigned int h; + + nextHe = he->next; + /* Get the new element index */ + h = dictHashKey(ht, he->key) & n.sizemask; + he->next = n.table[h]; + n.table[h] = he; + ht->used--; + /* Pass to the next element */ + he = nextHe; + } + } + assert(ht->used == 0); + hi_free(ht->table); + + /* Remap the new hashtable in the old */ + *ht = n; + return DICT_OK; +} + +/* Add an element to the target hash table */ +static int dictAdd(dict *ht, void *key, void *val) { + int index; + dictEntry *entry; + + /* Get the index of the new element, or -1 if + * the element already exists. */ + if ((index = _dictKeyIndex(ht, key)) == -1) + return DICT_ERR; + + /* Allocates the memory and stores key */ + entry = hi_malloc(sizeof(*entry)); + if (entry == NULL) + return DICT_ERR; + + entry->next = ht->table[index]; + ht->table[index] = entry; + + /* Set the hash entry fields. */ + dictSetHashKey(ht, entry, key); + dictSetHashVal(ht, entry, val); + ht->used++; + return DICT_OK; +} + +/* Add an element, discarding the old if the key already exists. + * Return 1 if the key was added from scratch, 0 if there was already an + * element with such key and dictReplace() just performed a value update + * operation. */ +static int dictReplace(dict *ht, void *key, void *val) { + dictEntry *entry, auxentry; + + /* Try to add the element. If the key + * does not exists dictAdd will succeed. */ + if (dictAdd(ht, key, val) == DICT_OK) + return 1; + /* It already exists, get the entry */ + entry = dictFind(ht, key); + if (entry == NULL) + return 0; + + /* Free the old value and set the new one */ + /* Set the new value and free the old one. Note that it is important + * to do that in this order, as the value may just be exactly the same + * as the previous one. In this context, think to reference counting, + * you want to increment (set), and then decrement (free), and not the + * reverse. */ + auxentry = *entry; + dictSetHashVal(ht, entry, val); + dictFreeEntryVal(ht, &auxentry); + return 0; +} + +/* Search and remove an element */ +static int dictDelete(dict *ht, const void *key) { + unsigned int h; + dictEntry *de, *prevde; + + if (ht->size == 0) + return DICT_ERR; + h = dictHashKey(ht, key) & ht->sizemask; + de = ht->table[h]; + + prevde = NULL; + while(de) { + if (dictCompareHashKeys(ht,key,de->key)) { + /* Unlink the element from the list */ + if (prevde) + prevde->next = de->next; + else + ht->table[h] = de->next; + + dictFreeEntryKey(ht,de); + dictFreeEntryVal(ht,de); + hi_free(de); + ht->used--; + return DICT_OK; + } + prevde = de; + de = de->next; + } + return DICT_ERR; /* not found */ +} + +/* Destroy an entire hash table */ +static int _dictClear(dict *ht) { + unsigned long i; + + /* Free all the elements */ + for (i = 0; i < ht->size && ht->used > 0; i++) { + dictEntry *he, *nextHe; + + if ((he = ht->table[i]) == NULL) continue; + while(he) { + nextHe = he->next; + dictFreeEntryKey(ht, he); + dictFreeEntryVal(ht, he); + hi_free(he); + ht->used--; + he = nextHe; + } + } + /* Free the table and the allocated cache structure */ + hi_free(ht->table); + /* Re-initialize the table */ + _dictReset(ht); + return DICT_OK; /* never fails */ +} + +/* Clear & Release the hash table */ +static void dictRelease(dict *ht) { + _dictClear(ht); + hi_free(ht); +} + +static dictEntry *dictFind(dict *ht, const void *key) { + dictEntry *he; + unsigned int h; + + if (ht->size == 0) return NULL; + h = dictHashKey(ht, key) & ht->sizemask; + he = ht->table[h]; + while(he) { + if (dictCompareHashKeys(ht, key, he->key)) + return he; + he = he->next; + } + return NULL; +} + +static void dictInitIterator(dictIterator *iter, dict *ht) { + iter->ht = ht; + iter->index = -1; + iter->entry = NULL; + iter->nextEntry = NULL; +} + +static dictEntry *dictNext(dictIterator *iter) { + while (1) { + if (iter->entry == NULL) { + iter->index++; + if (iter->index >= + (signed)iter->ht->size) break; + iter->entry = iter->ht->table[iter->index]; + } else { + iter->entry = iter->nextEntry; + } + if (iter->entry) { + /* We need to save the 'next' here, the iterator user + * may delete the entry we are returning. */ + iter->nextEntry = iter->entry->next; + return iter->entry; + } + } + return NULL; +} + +/* ------------------------- private functions ------------------------------ */ + +/* Expand the hash table if needed */ +static int _dictExpandIfNeeded(dict *ht) { + /* If the hash table is empty expand it to the initial size, + * if the table is "full" double its size. */ + if (ht->size == 0) + return dictExpand(ht, DICT_HT_INITIAL_SIZE); + if (ht->used == ht->size) + return dictExpand(ht, ht->size*2); + return DICT_OK; +} + +/* Our hash table capability is a power of two */ +static unsigned long _dictNextPower(unsigned long size) { + unsigned long i = DICT_HT_INITIAL_SIZE; + + if (size >= LONG_MAX) return LONG_MAX; + while(1) { + if (i >= size) + return i; + i *= 2; + } +} + +/* Returns the index of a free slot that can be populated with + * an hash entry for the given 'key'. + * If the key already exists, -1 is returned. */ +static int _dictKeyIndex(dict *ht, const void *key) { + unsigned int h; + dictEntry *he; + + /* Expand the hashtable if needed */ + if (_dictExpandIfNeeded(ht) == DICT_ERR) + return -1; + /* Compute the key hash value */ + h = dictHashKey(ht, key) & ht->sizemask; + /* Search if this slot does not already contain the given key */ + he = ht->table[h]; + while(he) { + if (dictCompareHashKeys(ht, key, he->key)) + return -1; + he = he->next; + } + return h; +} + diff --git a/deps/hiredis/dict.h b/deps/hiredis/dict.h new file mode 100644 index 0000000..6ad0acd --- /dev/null +++ b/deps/hiredis/dict.h @@ -0,0 +1,125 @@ +/* Hash table implementation. + * + * This file implements in memory hash tables with insert/del/replace/find/ + * get-random-element operations. Hash tables will auto resize if needed + * tables of power of two in size are used, collisions are handled by + * chaining. See the source code for more information... :) + * + * Copyright (c) 2006-2010, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DICT_H +#define __DICT_H + +#define DICT_OK 0 +#define DICT_ERR 1 + +/* Unused arguments generate annoying warnings... */ +#define DICT_NOTUSED(V) ((void) V) + +typedef struct dictEntry { + void *key; + void *val; + struct dictEntry *next; +} dictEntry; + +typedef struct dictType { + unsigned int (*hashFunction)(const void *key); + void *(*keyDup)(void *privdata, const void *key); + void *(*valDup)(void *privdata, const void *obj); + int (*keyCompare)(void *privdata, const void *key1, const void *key2); + void (*keyDestructor)(void *privdata, void *key); + void (*valDestructor)(void *privdata, void *obj); +} dictType; + +typedef struct dict { + dictEntry **table; + dictType *type; + unsigned long size; + unsigned long sizemask; + unsigned long used; + void *privdata; +} dict; + +typedef struct dictIterator { + dict *ht; + int index; + dictEntry *entry, *nextEntry; +} dictIterator; + +/* This is the initial size of every hash table */ +#define DICT_HT_INITIAL_SIZE 4 + +/* ------------------------------- Macros ------------------------------------*/ +#define dictFreeEntryVal(ht, entry) \ + if ((ht)->type->valDestructor) \ + (ht)->type->valDestructor((ht)->privdata, (entry)->val) + +#define dictSetHashVal(ht, entry, _val_) do { \ + if ((ht)->type->valDup) \ + entry->val = (ht)->type->valDup((ht)->privdata, _val_); \ + else \ + entry->val = (_val_); \ +} while(0) + +#define dictFreeEntryKey(ht, entry) \ + if ((ht)->type->keyDestructor) \ + (ht)->type->keyDestructor((ht)->privdata, (entry)->key) + +#define dictSetHashKey(ht, entry, _key_) do { \ + if ((ht)->type->keyDup) \ + entry->key = (ht)->type->keyDup((ht)->privdata, _key_); \ + else \ + entry->key = (_key_); \ +} while(0) + +#define dictCompareHashKeys(ht, key1, key2) \ + (((ht)->type->keyCompare) ? \ + (ht)->type->keyCompare((ht)->privdata, key1, key2) : \ + (key1) == (key2)) + +#define dictHashKey(ht, key) (ht)->type->hashFunction(key) + +#define dictGetEntryKey(he) ((he)->key) +#define dictGetEntryVal(he) ((he)->val) +#define dictSlots(ht) ((ht)->size) +#define dictSize(ht) ((ht)->used) + +/* API */ +static unsigned int dictGenHashFunction(const unsigned char *buf, int len); +static dict *dictCreate(dictType *type, void *privDataPtr); +static int dictExpand(dict *ht, unsigned long size); +static int dictAdd(dict *ht, void *key, void *val); +static int dictReplace(dict *ht, void *key, void *val); +static int dictDelete(dict *ht, const void *key); +static void dictRelease(dict *ht); +static dictEntry * dictFind(dict *ht, const void *key); +static void dictInitIterator(dictIterator *iter, dict *ht); +static dictEntry *dictNext(dictIterator *iter); + +#endif /* __DICT_H */ diff --git a/deps/hiredis/examples/CMakeLists.txt b/deps/hiredis/examples/CMakeLists.txt new file mode 100644 index 0000000..49cd8d4 --- /dev/null +++ b/deps/hiredis/examples/CMakeLists.txt @@ -0,0 +1,49 @@ +INCLUDE(FindPkgConfig) +# Check for GLib + +PKG_CHECK_MODULES(GLIB2 glib-2.0) +if (GLIB2_FOUND) + INCLUDE_DIRECTORIES(${GLIB2_INCLUDE_DIRS}) + LINK_DIRECTORIES(${GLIB2_LIBRARY_DIRS}) + ADD_EXECUTABLE(example-glib example-glib.c) + TARGET_LINK_LIBRARIES(example-glib hiredis ${GLIB2_LIBRARIES}) +ENDIF(GLIB2_FOUND) + +FIND_PATH(LIBEV ev.h + HINTS /usr/local /usr/opt/local + ENV LIBEV_INCLUDE_DIR) + +if (LIBEV) + # Just compile and link with libev + ADD_EXECUTABLE(example-libev example-libev.c) + TARGET_LINK_LIBRARIES(example-libev hiredis ev) +ENDIF() + +FIND_PATH(LIBEVENT event.h) +if (LIBEVENT) + ADD_EXECUTABLE(example-libevent example-libevent.c) + TARGET_LINK_LIBRARIES(example-libevent hiredis event) +ENDIF() + +FIND_PATH(LIBUV uv.h) +IF (LIBUV) + ADD_EXECUTABLE(example-libuv example-libuv.c) + TARGET_LINK_LIBRARIES(example-libuv hiredis uv) +ENDIF() + +IF (APPLE) + FIND_LIBRARY(CF CoreFoundation) + ADD_EXECUTABLE(example-macosx example-macosx.c) + TARGET_LINK_LIBRARIES(example-macosx hiredis ${CF}) +ENDIF() + +IF (ENABLE_SSL) + ADD_EXECUTABLE(example-ssl example-ssl.c) + TARGET_LINK_LIBRARIES(example-ssl hiredis hiredis_ssl) +ENDIF() + +ADD_EXECUTABLE(example example.c) +TARGET_LINK_LIBRARIES(example hiredis) + +ADD_EXECUTABLE(example-push example-push.c) +TARGET_LINK_LIBRARIES(example-push hiredis) diff --git a/deps/hiredis/examples/example-ae.c b/deps/hiredis/examples/example-ae.c new file mode 100644 index 0000000..8efa730 --- /dev/null +++ b/deps/hiredis/examples/example-ae.c @@ -0,0 +1,62 @@ +#include +#include +#include +#include + +#include +#include +#include + +/* Put event loop in the global scope, so it can be explicitly stopped */ +static aeEventLoop *loop; + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) return; + printf("argv[%s]: %s\n", (char*)privdata, reply->str); + + /* Disconnect after receiving the reply to GET */ + redisAsyncDisconnect(c); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + aeStop(loop); + return; + } + + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + aeStop(loop); + return; + } + + printf("Disconnected...\n"); + aeStop(loop); +} + +int main (int argc, char **argv) { + signal(SIGPIPE, SIG_IGN); + + redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + + loop = aeCreateEventLoop(64); + redisAeAttach(loop, c); + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + aeMain(loop); + return 0; +} + diff --git a/deps/hiredis/examples/example-glib.c b/deps/hiredis/examples/example-glib.c new file mode 100644 index 0000000..d6e10f8 --- /dev/null +++ b/deps/hiredis/examples/example-glib.c @@ -0,0 +1,73 @@ +#include + +#include +#include +#include + +static GMainLoop *mainloop; + +static void +connect_cb (const redisAsyncContext *ac G_GNUC_UNUSED, + int status) +{ + if (status != REDIS_OK) { + g_printerr("Failed to connect: %s\n", ac->errstr); + g_main_loop_quit(mainloop); + } else { + g_printerr("Connected...\n"); + } +} + +static void +disconnect_cb (const redisAsyncContext *ac G_GNUC_UNUSED, + int status) +{ + if (status != REDIS_OK) { + g_error("Failed to disconnect: %s", ac->errstr); + } else { + g_printerr("Disconnected...\n"); + g_main_loop_quit(mainloop); + } +} + +static void +command_cb(redisAsyncContext *ac, + gpointer r, + gpointer user_data G_GNUC_UNUSED) +{ + redisReply *reply = r; + + if (reply) { + g_print("REPLY: %s\n", reply->str); + } + + redisAsyncDisconnect(ac); +} + +gint +main (gint argc G_GNUC_UNUSED, + gchar *argv[] G_GNUC_UNUSED) +{ + redisAsyncContext *ac; + GMainContext *context = NULL; + GSource *source; + + ac = redisAsyncConnect("127.0.0.1", 6379); + if (ac->err) { + g_printerr("%s\n", ac->errstr); + exit(EXIT_FAILURE); + } + + source = redis_source_new(ac); + mainloop = g_main_loop_new(context, FALSE); + g_source_attach(source, context); + + redisAsyncSetConnectCallback(ac, connect_cb); + redisAsyncSetDisconnectCallback(ac, disconnect_cb); + redisAsyncCommand(ac, command_cb, NULL, "SET key 1234"); + redisAsyncCommand(ac, command_cb, NULL, "GET key"); + + g_main_loop_run(mainloop); + + return EXIT_SUCCESS; +} diff --git a/deps/hiredis/examples/example-ivykis.c b/deps/hiredis/examples/example-ivykis.c new file mode 100644 index 0000000..f57dc38 --- /dev/null +++ b/deps/hiredis/examples/example-ivykis.c @@ -0,0 +1,60 @@ +#include +#include +#include +#include + +#include +#include +#include + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) return; + printf("argv[%s]: %s\n", (char*)privdata, reply->str); + + /* Disconnect after receiving the reply to GET */ + redisAsyncDisconnect(c); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Disconnected...\n"); +} + +int main (int argc, char **argv) { +#ifndef _WIN32 + signal(SIGPIPE, SIG_IGN); +#endif + + iv_init(); + + redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + + redisIvykisAttach(c); + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + + iv_main(); + + iv_deinit(); + + return 0; +} diff --git a/deps/hiredis/examples/example-libev.c b/deps/hiredis/examples/example-libev.c new file mode 100644 index 0000000..ec47430 --- /dev/null +++ b/deps/hiredis/examples/example-libev.c @@ -0,0 +1,54 @@ +#include +#include +#include +#include + +#include +#include +#include + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) return; + printf("argv[%s]: %s\n", (char*)privdata, reply->str); + + /* Disconnect after receiving the reply to GET */ + redisAsyncDisconnect(c); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Disconnected...\n"); +} + +int main (int argc, char **argv) { +#ifndef _WIN32 + signal(SIGPIPE, SIG_IGN); +#endif + + redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + + redisLibevAttach(EV_DEFAULT_ c); + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + ev_loop(EV_DEFAULT_ 0); + return 0; +} diff --git a/deps/hiredis/examples/example-libevent-ssl.c b/deps/hiredis/examples/example-libevent-ssl.c new file mode 100644 index 0000000..7d99af1 --- /dev/null +++ b/deps/hiredis/examples/example-libevent-ssl.c @@ -0,0 +1,90 @@ +#include +#include +#include +#include + +#include +#include +#include +#include + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) return; + printf("argv[%s]: %s\n", (char*)privdata, reply->str); + + /* Disconnect after receiving the reply to GET */ + redisAsyncDisconnect(c); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Disconnected...\n"); +} + +int main (int argc, char **argv) { +#ifndef _WIN32 + signal(SIGPIPE, SIG_IGN); +#endif + + struct event_base *base = event_base_new(); + if (argc < 5) { + fprintf(stderr, + "Usage: %s [ca]\n", argv[0]); + exit(1); + } + + const char *value = argv[1]; + size_t nvalue = strlen(value); + + const char *hostname = argv[2]; + int port = atoi(argv[3]); + + const char *cert = argv[4]; + const char *certKey = argv[5]; + const char *caCert = argc > 5 ? argv[6] : NULL; + + redisSSLContext *ssl; + redisSSLContextError ssl_error; + + redisInitOpenSSL(); + + ssl = redisCreateSSLContext(caCert, NULL, + cert, certKey, NULL, &ssl_error); + if (!ssl) { + printf("Error: %s\n", redisSSLContextGetError(ssl_error)); + return 1; + } + + redisAsyncContext *c = redisAsyncConnect(hostname, port); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + if (redisInitiateSSLWithContext(&c->c, ssl) != REDIS_OK) { + printf("SSL Error!\n"); + exit(1); + } + + redisLibeventAttach(c,base); + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + redisAsyncCommand(c, NULL, NULL, "SET key %b", value, nvalue); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + event_base_dispatch(base); + + redisFreeSSLContext(ssl); + return 0; +} diff --git a/deps/hiredis/examples/example-libevent.c b/deps/hiredis/examples/example-libevent.c new file mode 100644 index 0000000..49bddd0 --- /dev/null +++ b/deps/hiredis/examples/example-libevent.c @@ -0,0 +1,67 @@ +#include +#include +#include +#include + +#include +#include +#include + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) { + if (c->errstr) { + printf("errstr: %s\n", c->errstr); + } + return; + } + printf("argv[%s]: %s\n", (char*)privdata, reply->str); + + /* Disconnect after receiving the reply to GET */ + redisAsyncDisconnect(c); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Disconnected...\n"); +} + +int main (int argc, char **argv) { +#ifndef _WIN32 + signal(SIGPIPE, SIG_IGN); +#endif + + struct event_base *base = event_base_new(); + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, "127.0.0.1", 6379); + struct timeval tv = {0}; + tv.tv_sec = 1; + options.connect_timeout = &tv; + + + redisAsyncContext *c = redisAsyncConnectWithOptions(&options); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + + redisLibeventAttach(c,base); + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + event_base_dispatch(base); + return 0; +} diff --git a/deps/hiredis/examples/example-libuv.c b/deps/hiredis/examples/example-libuv.c new file mode 100644 index 0000000..53fd04a --- /dev/null +++ b/deps/hiredis/examples/example-libuv.c @@ -0,0 +1,81 @@ +#include +#include +#include +#include + +#include +#include +#include + +void debugCallback(redisAsyncContext *c, void *r, void *privdata) { + (void)privdata; //unused + redisReply *reply = r; + if (reply == NULL) { + /* The DEBUG SLEEP command will almost always fail, because we have set a 1 second timeout */ + printf("`DEBUG SLEEP` error: %s\n", c->errstr ? c->errstr : "unknown error"); + return; + } + /* Disconnect after receiving the reply of DEBUG SLEEP (which will not)*/ + redisAsyncDisconnect(c); +} + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) { + printf("`GET key` error: %s\n", c->errstr ? c->errstr : "unknown error"); + return; + } + printf("`GET key` result: argv[%s]: %s\n", (char*)privdata, reply->str); + + /* start another request that demonstrate timeout */ + redisAsyncCommand(c, debugCallback, NULL, "DEBUG SLEEP %f", 1.5); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("connect error: %s\n", c->errstr); + return; + } + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("disconnect because of error: %s\n", c->errstr); + return; + } + printf("Disconnected...\n"); +} + +int main (int argc, char **argv) { +#ifndef _WIN32 + signal(SIGPIPE, SIG_IGN); +#endif + + uv_loop_t* loop = uv_default_loop(); + + redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + + redisLibuvAttach(c,loop); + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + redisAsyncSetTimeout(c, (struct timeval){ .tv_sec = 1, .tv_usec = 0}); + + /* + In this demo, we first `set key`, then `get key` to demonstrate the basic usage of libuv adapter. + Then in `getCallback`, we start a `debug sleep` command to create 1.5 second long request. + Because we have set a 1 second timeout to the connection, the command will always fail with a + timeout error, which is shown in the `debugCallback`. + */ + + redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + + uv_run(loop, UV_RUN_DEFAULT); + return 0; +} diff --git a/deps/hiredis/examples/example-macosx.c b/deps/hiredis/examples/example-macosx.c new file mode 100644 index 0000000..bc84ed5 --- /dev/null +++ b/deps/hiredis/examples/example-macosx.c @@ -0,0 +1,66 @@ +// +// Created by Дмитрий Бахвалов on 13.07.15. +// Copyright (c) 2015 Dmitry Bakhvalov. All rights reserved. +// + +#include + +#include +#include +#include + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) return; + printf("argv[%s]: %s\n", (char*)privdata, reply->str); + + /* Disconnect after receiving the reply to GET */ + redisAsyncDisconnect(c); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + CFRunLoopStop(CFRunLoopGetCurrent()); + printf("Disconnected...\n"); +} + +int main (int argc, char **argv) { + signal(SIGPIPE, SIG_IGN); + + CFRunLoopRef loop = CFRunLoopGetCurrent(); + if( !loop ) { + printf("Error: Cannot get current run loop\n"); + return 1; + } + + redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + + redisMacOSAttach(c, loop); + + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + + redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + + CFRunLoopRun(); + + return 0; +} + diff --git a/deps/hiredis/examples/example-push.c b/deps/hiredis/examples/example-push.c new file mode 100644 index 0000000..6bc1205 --- /dev/null +++ b/deps/hiredis/examples/example-push.c @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2020, Michael Grunder + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include + +#define KEY_COUNT 5 + +#define panicAbort(fmt, ...) \ + do { \ + fprintf(stderr, "%s:%d:%s(): " fmt, __FILE__, __LINE__, __func__, __VA_ARGS__); \ + exit(-1); \ + } while (0) + +static void assertReplyAndFree(redisContext *context, redisReply *reply, int type) { + if (reply == NULL) + panicAbort("NULL reply from server (error: %s)", context->errstr); + + if (reply->type != type) { + if (reply->type == REDIS_REPLY_ERROR) + fprintf(stderr, "Redis Error: %s\n", reply->str); + + panicAbort("Expected reply type %d but got type %d", type, reply->type); + } + + freeReplyObject(reply); +} + +/* Switch to the RESP3 protocol and enable client tracking */ +static void enableClientTracking(redisContext *c) { + redisReply *reply = redisCommand(c, "HELLO 3"); + if (reply == NULL || c->err) { + panicAbort("NULL reply or server error (error: %s)", c->errstr); + } + + if (reply->type != REDIS_REPLY_MAP) { + fprintf(stderr, "Error: Can't send HELLO 3 command. Are you sure you're "); + fprintf(stderr, "connected to redis-server >= 6.0.0?\nRedis error: %s\n", + reply->type == REDIS_REPLY_ERROR ? reply->str : "(unknown)"); + exit(-1); + } + + freeReplyObject(reply); + + /* Enable client tracking */ + reply = redisCommand(c, "CLIENT TRACKING ON"); + assertReplyAndFree(c, reply, REDIS_REPLY_STATUS); +} + +void pushReplyHandler(void *privdata, void *r) { + redisReply *reply = r; + int *invalidations = privdata; + + /* Sanity check on the invalidation reply */ + if (reply->type != REDIS_REPLY_PUSH || reply->elements != 2 || + reply->element[1]->type != REDIS_REPLY_ARRAY || + reply->element[1]->element[0]->type != REDIS_REPLY_STRING) + { + panicAbort("%s", "Can't parse PUSH message!"); + } + + /* Increment our invalidation count */ + *invalidations += 1; + + printf("pushReplyHandler(): INVALIDATE '%s' (invalidation count: %d)\n", + reply->element[1]->element[0]->str, *invalidations); + + freeReplyObject(reply); +} + +/* We aren't actually freeing anything here, but it is included to show that we can + * have hiredis call our data destructor when freeing the context */ +void privdata_dtor(void *privdata) { + unsigned int *icount = privdata; + printf("privdata_dtor(): In context privdata dtor (invalidations: %u)\n", *icount); +} + +int main(int argc, char **argv) { + unsigned int j, invalidations = 0; + redisContext *c; + redisReply *reply; + + const char *hostname = (argc > 1) ? argv[1] : "127.0.0.1"; + int port = (argc > 2) ? atoi(argv[2]) : 6379; + + redisOptions o = {0}; + REDIS_OPTIONS_SET_TCP(&o, hostname, port); + + /* Set our context privdata to the address of our invalidation counter. Each + * time our PUSH handler is called, hiredis will pass the privdata for context. + * + * This could also be done after we create the context like so: + * + * c->privdata = &invalidations; + * c->free_privdata = privdata_dtor; + */ + REDIS_OPTIONS_SET_PRIVDATA(&o, &invalidations, privdata_dtor); + + /* Set our custom PUSH message handler */ + o.push_cb = pushReplyHandler; + + c = redisConnectWithOptions(&o); + if (c == NULL || c->err) + panicAbort("Connection error: %s", c ? c->errstr : "OOM"); + + /* Enable RESP3 and turn on client tracking */ + enableClientTracking(c); + + /* Set some keys and then read them back. Once we do that, Redis will deliver + * invalidation push messages whenever the key is modified */ + for (j = 0; j < KEY_COUNT; j++) { + reply = redisCommand(c, "SET key:%d initial:%d", j, j); + assertReplyAndFree(c, reply, REDIS_REPLY_STATUS); + + reply = redisCommand(c, "GET key:%d", j); + assertReplyAndFree(c, reply, REDIS_REPLY_STRING); + } + + /* Trigger invalidation messages by updating keys we just read */ + for (j = 0; j < KEY_COUNT; j++) { + printf(" main(): SET key:%d update:%d\n", j, j); + reply = redisCommand(c, "SET key:%d update:%d", j, j); + assertReplyAndFree(c, reply, REDIS_REPLY_STATUS); + printf(" main(): SET REPLY OK\n"); + } + + printf("\nTotal detected invalidations: %d, expected: %d\n", invalidations, KEY_COUNT); + + /* PING server */ + redisFree(c); +} diff --git a/deps/hiredis/examples/example-qt.cpp b/deps/hiredis/examples/example-qt.cpp new file mode 100644 index 0000000..f524c3f --- /dev/null +++ b/deps/hiredis/examples/example-qt.cpp @@ -0,0 +1,46 @@ +#include +using namespace std; + +#include +#include + +#include "example-qt.h" + +void getCallback(redisAsyncContext *, void * r, void * privdata) { + + redisReply * reply = static_cast(r); + ExampleQt * ex = static_cast(privdata); + if (reply == nullptr || ex == nullptr) return; + + cout << "key: " << reply->str << endl; + + ex->finish(); +} + +void ExampleQt::run() { + + m_ctx = redisAsyncConnect("localhost", 6379); + + if (m_ctx->err) { + cerr << "Error: " << m_ctx->errstr << endl; + redisAsyncFree(m_ctx); + emit finished(); + } + + m_adapter.setContext(m_ctx); + + redisAsyncCommand(m_ctx, NULL, NULL, "SET key %s", m_value); + redisAsyncCommand(m_ctx, getCallback, this, "GET key"); +} + +int main (int argc, char **argv) { + + QCoreApplication app(argc, argv); + + ExampleQt example(argv[argc-1]); + + QObject::connect(&example, SIGNAL(finished()), &app, SLOT(quit())); + QTimer::singleShot(0, &example, SLOT(run())); + + return app.exec(); +} diff --git a/deps/hiredis/examples/example-qt.h b/deps/hiredis/examples/example-qt.h new file mode 100644 index 0000000..374f476 --- /dev/null +++ b/deps/hiredis/examples/example-qt.h @@ -0,0 +1,32 @@ +#ifndef __HIREDIS_EXAMPLE_QT_H +#define __HIREDIS_EXAMPLE_QT_H + +#include + +class ExampleQt : public QObject { + + Q_OBJECT + + public: + ExampleQt(const char * value, QObject * parent = 0) + : QObject(parent), m_value(value) {} + + signals: + void finished(); + + public slots: + void run(); + + private: + void finish() { emit finished(); } + + private: + const char * m_value; + redisAsyncContext * m_ctx; + RedisQtAdapter m_adapter; + + friend + void getCallback(redisAsyncContext *, void *, void *); +}; + +#endif /* !__HIREDIS_EXAMPLE_QT_H */ diff --git a/deps/hiredis/examples/example-ssl.c b/deps/hiredis/examples/example-ssl.c new file mode 100644 index 0000000..b8ca442 --- /dev/null +++ b/deps/hiredis/examples/example-ssl.c @@ -0,0 +1,113 @@ +#include +#include +#include + +#include +#include + +#ifdef _MSC_VER +#include /* For struct timeval */ +#endif + +int main(int argc, char **argv) { + unsigned int j; + redisSSLContext *ssl; + redisSSLContextError ssl_error; + redisContext *c; + redisReply *reply; + if (argc < 4) { + printf("Usage: %s [ca]\n", argv[0]); + exit(1); + } + const char *hostname = (argc > 1) ? argv[1] : "127.0.0.1"; + int port = atoi(argv[2]); + const char *cert = argv[3]; + const char *key = argv[4]; + const char *ca = argc > 4 ? argv[5] : NULL; + + redisInitOpenSSL(); + ssl = redisCreateSSLContext(ca, NULL, cert, key, NULL, &ssl_error); + if (!ssl) { + printf("SSL Context error: %s\n", + redisSSLContextGetError(ssl_error)); + exit(1); + } + + struct timeval tv = { 1, 500000 }; // 1.5 seconds + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, hostname, port); + options.connect_timeout = &tv; + c = redisConnectWithOptions(&options); + + if (c == NULL || c->err) { + if (c) { + printf("Connection error: %s\n", c->errstr); + redisFree(c); + } else { + printf("Connection error: can't allocate redis context\n"); + } + exit(1); + } + + if (redisInitiateSSLWithContext(c, ssl) != REDIS_OK) { + printf("Couldn't initialize SSL!\n"); + printf("Error: %s\n", c->errstr); + redisFree(c); + exit(1); + } + + /* PING server */ + reply = redisCommand(c,"PING"); + printf("PING: %s\n", reply->str); + freeReplyObject(reply); + + /* Set a key */ + reply = redisCommand(c,"SET %s %s", "foo", "hello world"); + printf("SET: %s\n", reply->str); + freeReplyObject(reply); + + /* Set a key using binary safe API */ + reply = redisCommand(c,"SET %b %b", "bar", (size_t) 3, "hello", (size_t) 5); + printf("SET (binary API): %s\n", reply->str); + freeReplyObject(reply); + + /* Try a GET and two INCR */ + reply = redisCommand(c,"GET foo"); + printf("GET foo: %s\n", reply->str); + freeReplyObject(reply); + + reply = redisCommand(c,"INCR counter"); + printf("INCR counter: %lld\n", reply->integer); + freeReplyObject(reply); + /* again ... */ + reply = redisCommand(c,"INCR counter"); + printf("INCR counter: %lld\n", reply->integer); + freeReplyObject(reply); + + /* Create a list of numbers, from 0 to 9 */ + reply = redisCommand(c,"DEL mylist"); + freeReplyObject(reply); + for (j = 0; j < 10; j++) { + char buf[64]; + + snprintf(buf,64,"%u",j); + reply = redisCommand(c,"LPUSH mylist element-%s", buf); + freeReplyObject(reply); + } + + /* Let's check what we have inside the list */ + reply = redisCommand(c,"LRANGE mylist 0 -1"); + if (reply->type == REDIS_REPLY_ARRAY) { + for (j = 0; j < reply->elements; j++) { + printf("%u) %s\n", j, reply->element[j]->str); + } + } + freeReplyObject(reply); + + /* Disconnects and frees the context */ + redisFree(c); + + redisFreeSSLContext(ssl); + + return 0; +} diff --git a/deps/hiredis/examples/example.c b/deps/hiredis/examples/example.c new file mode 100644 index 0000000..f1b8b4a --- /dev/null +++ b/deps/hiredis/examples/example.c @@ -0,0 +1,94 @@ +#include +#include +#include +#include + +#ifdef _MSC_VER +#include /* For struct timeval */ +#endif + +int main(int argc, char **argv) { + unsigned int j, isunix = 0; + redisContext *c; + redisReply *reply; + const char *hostname = (argc > 1) ? argv[1] : "127.0.0.1"; + + if (argc > 2) { + if (*argv[2] == 'u' || *argv[2] == 'U') { + isunix = 1; + /* in this case, host is the path to the unix socket */ + printf("Will connect to unix socket @%s\n", hostname); + } + } + + int port = (argc > 2) ? atoi(argv[2]) : 6379; + + struct timeval timeout = { 1, 500000 }; // 1.5 seconds + if (isunix) { + c = redisConnectUnixWithTimeout(hostname, timeout); + } else { + c = redisConnectWithTimeout(hostname, port, timeout); + } + if (c == NULL || c->err) { + if (c) { + printf("Connection error: %s\n", c->errstr); + redisFree(c); + } else { + printf("Connection error: can't allocate redis context\n"); + } + exit(1); + } + + /* PING server */ + reply = redisCommand(c,"PING"); + printf("PING: %s\n", reply->str); + freeReplyObject(reply); + + /* Set a key */ + reply = redisCommand(c,"SET %s %s", "foo", "hello world"); + printf("SET: %s\n", reply->str); + freeReplyObject(reply); + + /* Set a key using binary safe API */ + reply = redisCommand(c,"SET %b %b", "bar", (size_t) 3, "hello", (size_t) 5); + printf("SET (binary API): %s\n", reply->str); + freeReplyObject(reply); + + /* Try a GET and two INCR */ + reply = redisCommand(c,"GET foo"); + printf("GET foo: %s\n", reply->str); + freeReplyObject(reply); + + reply = redisCommand(c,"INCR counter"); + printf("INCR counter: %lld\n", reply->integer); + freeReplyObject(reply); + /* again ... */ + reply = redisCommand(c,"INCR counter"); + printf("INCR counter: %lld\n", reply->integer); + freeReplyObject(reply); + + /* Create a list of numbers, from 0 to 9 */ + reply = redisCommand(c,"DEL mylist"); + freeReplyObject(reply); + for (j = 0; j < 10; j++) { + char buf[64]; + + snprintf(buf,64,"%u",j); + reply = redisCommand(c,"LPUSH mylist element-%s", buf); + freeReplyObject(reply); + } + + /* Let's check what we have inside the list */ + reply = redisCommand(c,"LRANGE mylist 0 -1"); + if (reply->type == REDIS_REPLY_ARRAY) { + for (j = 0; j < reply->elements; j++) { + printf("%u) %s\n", j, reply->element[j]->str); + } + } + freeReplyObject(reply); + + /* Disconnects and frees the context */ + redisFree(c); + + return 0; +} diff --git a/deps/hiredis/fmacros.h b/deps/hiredis/fmacros.h new file mode 100644 index 0000000..754a53c --- /dev/null +++ b/deps/hiredis/fmacros.h @@ -0,0 +1,14 @@ +#ifndef __HIREDIS_FMACRO_H +#define __HIREDIS_FMACRO_H + +#ifndef _AIX +#define _XOPEN_SOURCE 600 +#define _POSIX_C_SOURCE 200112L +#endif + +#if defined(__APPLE__) && defined(__MACH__) +/* Enable TCP_KEEPALIVE */ +#define _DARWIN_C_SOURCE +#endif + +#endif diff --git a/deps/hiredis/fuzzing/format_command_fuzzer.c b/deps/hiredis/fuzzing/format_command_fuzzer.c new file mode 100644 index 0000000..91adeac --- /dev/null +++ b/deps/hiredis/fuzzing/format_command_fuzzer.c @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2020, Salvatore Sanfilippo + * Copyright (c) 2020, Pieter Noordhuis + * Copyright (c) 2020, Matt Stancliff , + * Jan-Erik Rediger + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include "hiredis.h" + +int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { + char *new_str, *cmd; + + if (size < 3) + return 0; + + new_str = malloc(size+1); + if (new_str == NULL) + return 0; + + memcpy(new_str, data, size); + new_str[size] = '\0'; + + redisFormatCommand(&cmd, new_str); + + if (cmd != NULL) + hi_free(cmd); + free(new_str); + return 0; +} diff --git a/deps/hiredis/hiredis-config.cmake.in b/deps/hiredis/hiredis-config.cmake.in new file mode 100644 index 0000000..98851dc --- /dev/null +++ b/deps/hiredis/hiredis-config.cmake.in @@ -0,0 +1,13 @@ +@PACKAGE_INIT@ + +set_and_check(hiredis_INCLUDEDIR "@PACKAGE_INCLUDE_INSTALL_DIR@") + +IF (NOT TARGET hiredis::hiredis) + INCLUDE(${CMAKE_CURRENT_LIST_DIR}/hiredis-targets.cmake) +ENDIF() + +SET(hiredis_LIBRARIES hiredis::hiredis) +SET(hiredis_INCLUDE_DIRS ${hiredis_INCLUDEDIR}) + +check_required_components(hiredis) + diff --git a/deps/hiredis/hiredis.c b/deps/hiredis/hiredis.c new file mode 100644 index 0000000..f7b61c2 --- /dev/null +++ b/deps/hiredis/hiredis.c @@ -0,0 +1,1188 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2014, Pieter Noordhuis + * Copyright (c) 2015, Matt Stancliff , + * Jan-Erik Rediger + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include +#include +#include +#include +#include + +#include "hiredis.h" +#include "net.h" +#include "sds.h" +#include "async.h" +#include "win32.h" + +extern int redisContextUpdateConnectTimeout(redisContext *c, const struct timeval *timeout); +extern int redisContextUpdateCommandTimeout(redisContext *c, const struct timeval *timeout); + +static redisContextFuncs redisContextDefaultFuncs = { + .free_privctx = NULL, + .async_read = redisAsyncRead, + .async_write = redisAsyncWrite, + .read = redisNetRead, + .write = redisNetWrite +}; + +static redisReply *createReplyObject(int type); +static void *createStringObject(const redisReadTask *task, char *str, size_t len); +static void *createArrayObject(const redisReadTask *task, size_t elements); +static void *createIntegerObject(const redisReadTask *task, long long value); +static void *createDoubleObject(const redisReadTask *task, double value, char *str, size_t len); +static void *createNilObject(const redisReadTask *task); +static void *createBoolObject(const redisReadTask *task, int bval); + +/* Default set of functions to build the reply. Keep in mind that such a + * function returning NULL is interpreted as OOM. */ +static redisReplyObjectFunctions defaultFunctions = { + createStringObject, + createArrayObject, + createIntegerObject, + createDoubleObject, + createNilObject, + createBoolObject, + freeReplyObject +}; + +/* Create a reply object */ +static redisReply *createReplyObject(int type) { + redisReply *r = hi_calloc(1,sizeof(*r)); + + if (r == NULL) + return NULL; + + r->type = type; + return r; +} + +/* Free a reply object */ +void freeReplyObject(void *reply) { + redisReply *r = reply; + size_t j; + + if (r == NULL) + return; + + switch(r->type) { + case REDIS_REPLY_INTEGER: + case REDIS_REPLY_NIL: + case REDIS_REPLY_BOOL: + break; /* Nothing to free */ + case REDIS_REPLY_ARRAY: + case REDIS_REPLY_MAP: + case REDIS_REPLY_SET: + case REDIS_REPLY_PUSH: + if (r->element != NULL) { + for (j = 0; j < r->elements; j++) + freeReplyObject(r->element[j]); + hi_free(r->element); + } + break; + case REDIS_REPLY_ERROR: + case REDIS_REPLY_STATUS: + case REDIS_REPLY_STRING: + case REDIS_REPLY_DOUBLE: + case REDIS_REPLY_VERB: + case REDIS_REPLY_BIGNUM: + hi_free(r->str); + break; + } + hi_free(r); +} + +static void *createStringObject(const redisReadTask *task, char *str, size_t len) { + redisReply *r, *parent; + char *buf; + + r = createReplyObject(task->type); + if (r == NULL) + return NULL; + + assert(task->type == REDIS_REPLY_ERROR || + task->type == REDIS_REPLY_STATUS || + task->type == REDIS_REPLY_STRING || + task->type == REDIS_REPLY_VERB || + task->type == REDIS_REPLY_BIGNUM); + + /* Copy string value */ + if (task->type == REDIS_REPLY_VERB) { + buf = hi_malloc(len-4+1); /* Skip 4 bytes of verbatim type header. */ + if (buf == NULL) goto oom; + + memcpy(r->vtype,str,3); + r->vtype[3] = '\0'; + memcpy(buf,str+4,len-4); + buf[len-4] = '\0'; + r->len = len - 4; + } else { + buf = hi_malloc(len+1); + if (buf == NULL) goto oom; + + memcpy(buf,str,len); + buf[len] = '\0'; + r->len = len; + } + r->str = buf; + + if (task->parent) { + parent = task->parent->obj; + assert(parent->type == REDIS_REPLY_ARRAY || + parent->type == REDIS_REPLY_MAP || + parent->type == REDIS_REPLY_SET || + parent->type == REDIS_REPLY_PUSH); + parent->element[task->idx] = r; + } + return r; + +oom: + freeReplyObject(r); + return NULL; +} + +static void *createArrayObject(const redisReadTask *task, size_t elements) { + redisReply *r, *parent; + + r = createReplyObject(task->type); + if (r == NULL) + return NULL; + + if (elements > 0) { + r->element = hi_calloc(elements,sizeof(redisReply*)); + if (r->element == NULL) { + freeReplyObject(r); + return NULL; + } + } + + r->elements = elements; + + if (task->parent) { + parent = task->parent->obj; + assert(parent->type == REDIS_REPLY_ARRAY || + parent->type == REDIS_REPLY_MAP || + parent->type == REDIS_REPLY_SET || + parent->type == REDIS_REPLY_PUSH); + parent->element[task->idx] = r; + } + return r; +} + +static void *createIntegerObject(const redisReadTask *task, long long value) { + redisReply *r, *parent; + + r = createReplyObject(REDIS_REPLY_INTEGER); + if (r == NULL) + return NULL; + + r->integer = value; + + if (task->parent) { + parent = task->parent->obj; + assert(parent->type == REDIS_REPLY_ARRAY || + parent->type == REDIS_REPLY_MAP || + parent->type == REDIS_REPLY_SET || + parent->type == REDIS_REPLY_PUSH); + parent->element[task->idx] = r; + } + return r; +} + +static void *createDoubleObject(const redisReadTask *task, double value, char *str, size_t len) { + redisReply *r, *parent; + + r = createReplyObject(REDIS_REPLY_DOUBLE); + if (r == NULL) + return NULL; + + r->dval = value; + r->str = hi_malloc(len+1); + if (r->str == NULL) { + freeReplyObject(r); + return NULL; + } + + /* The double reply also has the original protocol string representing a + * double as a null terminated string. This way the caller does not need + * to format back for string conversion, especially since Redis does efforts + * to make the string more human readable avoiding the calssical double + * decimal string conversion artifacts. */ + memcpy(r->str, str, len); + r->str[len] = '\0'; + r->len = len; + + if (task->parent) { + parent = task->parent->obj; + assert(parent->type == REDIS_REPLY_ARRAY || + parent->type == REDIS_REPLY_MAP || + parent->type == REDIS_REPLY_SET || + parent->type == REDIS_REPLY_PUSH); + parent->element[task->idx] = r; + } + return r; +} + +static void *createNilObject(const redisReadTask *task) { + redisReply *r, *parent; + + r = createReplyObject(REDIS_REPLY_NIL); + if (r == NULL) + return NULL; + + if (task->parent) { + parent = task->parent->obj; + assert(parent->type == REDIS_REPLY_ARRAY || + parent->type == REDIS_REPLY_MAP || + parent->type == REDIS_REPLY_SET || + parent->type == REDIS_REPLY_PUSH); + parent->element[task->idx] = r; + } + return r; +} + +static void *createBoolObject(const redisReadTask *task, int bval) { + redisReply *r, *parent; + + r = createReplyObject(REDIS_REPLY_BOOL); + if (r == NULL) + return NULL; + + r->integer = bval != 0; + + if (task->parent) { + parent = task->parent->obj; + assert(parent->type == REDIS_REPLY_ARRAY || + parent->type == REDIS_REPLY_MAP || + parent->type == REDIS_REPLY_SET || + parent->type == REDIS_REPLY_PUSH); + parent->element[task->idx] = r; + } + return r; +} + +/* Return the number of digits of 'v' when converted to string in radix 10. + * Implementation borrowed from link in redis/src/util.c:string2ll(). */ +static uint32_t countDigits(uint64_t v) { + uint32_t result = 1; + for (;;) { + if (v < 10) return result; + if (v < 100) return result + 1; + if (v < 1000) return result + 2; + if (v < 10000) return result + 3; + v /= 10000U; + result += 4; + } +} + +/* Helper that calculates the bulk length given a certain string length. */ +static size_t bulklen(size_t len) { + return 1+countDigits(len)+2+len+2; +} + +int redisvFormatCommand(char **target, const char *format, va_list ap) { + const char *c = format; + char *cmd = NULL; /* final command */ + int pos; /* position in final command */ + hisds curarg, newarg; /* current argument */ + int touched = 0; /* was the current argument touched? */ + char **curargv = NULL, **newargv = NULL; + int argc = 0; + int totlen = 0; + int error_type = 0; /* 0 = no error; -1 = memory error; -2 = format error */ + int j; + + /* Abort if there is not target to set */ + if (target == NULL) + return -1; + + /* Build the command string accordingly to protocol */ + curarg = hi_sdsempty(); + if (curarg == NULL) + return -1; + + while(*c != '\0') { + if (*c != '%' || c[1] == '\0') { + if (*c == ' ') { + if (touched) { + newargv = hi_realloc(curargv,sizeof(char*)*(argc+1)); + if (newargv == NULL) goto memory_err; + curargv = newargv; + curargv[argc++] = curarg; + totlen += bulklen(hi_sdslen(curarg)); + + /* curarg is put in argv so it can be overwritten. */ + curarg = hi_sdsempty(); + if (curarg == NULL) goto memory_err; + touched = 0; + } + } else { + newarg = hi_sdscatlen(curarg,c,1); + if (newarg == NULL) goto memory_err; + curarg = newarg; + touched = 1; + } + } else { + char *arg; + size_t size; + + /* Set newarg so it can be checked even if it is not touched. */ + newarg = curarg; + + switch(c[1]) { + case 's': + arg = va_arg(ap,char*); + size = strlen(arg); + if (size > 0) + newarg = hi_sdscatlen(curarg,arg,size); + break; + case 'b': + arg = va_arg(ap,char*); + size = va_arg(ap,size_t); + if (size > 0) + newarg = hi_sdscatlen(curarg,arg,size); + break; + case '%': + newarg = hi_sdscat(curarg,"%"); + break; + default: + /* Try to detect printf format */ + { + static const char intfmts[] = "diouxX"; + static const char flags[] = "#0-+ "; + char _format[16]; + const char *_p = c+1; + size_t _l = 0; + va_list _cpy; + + /* Flags */ + while (*_p != '\0' && strchr(flags,*_p) != NULL) _p++; + + /* Field width */ + while (*_p != '\0' && isdigit(*_p)) _p++; + + /* Precision */ + if (*_p == '.') { + _p++; + while (*_p != '\0' && isdigit(*_p)) _p++; + } + + /* Copy va_list before consuming with va_arg */ + va_copy(_cpy,ap); + + /* Integer conversion (without modifiers) */ + if (strchr(intfmts,*_p) != NULL) { + va_arg(ap,int); + goto fmt_valid; + } + + /* Double conversion (without modifiers) */ + if (strchr("eEfFgGaA",*_p) != NULL) { + va_arg(ap,double); + goto fmt_valid; + } + + /* Size: char */ + if (_p[0] == 'h' && _p[1] == 'h') { + _p += 2; + if (*_p != '\0' && strchr(intfmts,*_p) != NULL) { + va_arg(ap,int); /* char gets promoted to int */ + goto fmt_valid; + } + goto fmt_invalid; + } + + /* Size: short */ + if (_p[0] == 'h') { + _p += 1; + if (*_p != '\0' && strchr(intfmts,*_p) != NULL) { + va_arg(ap,int); /* short gets promoted to int */ + goto fmt_valid; + } + goto fmt_invalid; + } + + /* Size: long long */ + if (_p[0] == 'l' && _p[1] == 'l') { + _p += 2; + if (*_p != '\0' && strchr(intfmts,*_p) != NULL) { + va_arg(ap,long long); + goto fmt_valid; + } + goto fmt_invalid; + } + + /* Size: long */ + if (_p[0] == 'l') { + _p += 1; + if (*_p != '\0' && strchr(intfmts,*_p) != NULL) { + va_arg(ap,long); + goto fmt_valid; + } + goto fmt_invalid; + } + + fmt_invalid: + va_end(_cpy); + goto format_err; + + fmt_valid: + _l = (_p+1)-c; + if (_l < sizeof(_format)-2) { + memcpy(_format,c,_l); + _format[_l] = '\0'; + newarg = hi_sdscatvprintf(curarg,_format,_cpy); + + /* Update current position (note: outer blocks + * increment c twice so compensate here) */ + c = _p-1; + } + + va_end(_cpy); + break; + } + } + + if (newarg == NULL) goto memory_err; + curarg = newarg; + + touched = 1; + c++; + } + c++; + } + + /* Add the last argument if needed */ + if (touched) { + newargv = hi_realloc(curargv,sizeof(char*)*(argc+1)); + if (newargv == NULL) goto memory_err; + curargv = newargv; + curargv[argc++] = curarg; + totlen += bulklen(hi_sdslen(curarg)); + } else { + hi_sdsfree(curarg); + } + + /* Clear curarg because it was put in curargv or was free'd. */ + curarg = NULL; + + /* Add bytes needed to hold multi bulk count */ + totlen += 1+countDigits(argc)+2; + + /* Build the command at protocol level */ + cmd = hi_malloc(totlen+1); + if (cmd == NULL) goto memory_err; + + pos = sprintf(cmd,"*%d\r\n",argc); + for (j = 0; j < argc; j++) { + pos += sprintf(cmd+pos,"$%zu\r\n",hi_sdslen(curargv[j])); + memcpy(cmd+pos,curargv[j],hi_sdslen(curargv[j])); + pos += hi_sdslen(curargv[j]); + hi_sdsfree(curargv[j]); + cmd[pos++] = '\r'; + cmd[pos++] = '\n'; + } + assert(pos == totlen); + cmd[pos] = '\0'; + + hi_free(curargv); + *target = cmd; + return totlen; + +format_err: + error_type = -2; + goto cleanup; + +memory_err: + error_type = -1; + goto cleanup; + +cleanup: + if (curargv) { + while(argc--) + hi_sdsfree(curargv[argc]); + hi_free(curargv); + } + + hi_sdsfree(curarg); + hi_free(cmd); + + return error_type; +} + +/* Format a command according to the Redis protocol. This function + * takes a format similar to printf: + * + * %s represents a C null terminated string you want to interpolate + * %b represents a binary safe string + * + * When using %b you need to provide both the pointer to the string + * and the length in bytes as a size_t. Examples: + * + * len = redisFormatCommand(target, "GET %s", mykey); + * len = redisFormatCommand(target, "SET %s %b", mykey, myval, myvallen); + */ +int redisFormatCommand(char **target, const char *format, ...) { + va_list ap; + int len; + va_start(ap,format); + len = redisvFormatCommand(target,format,ap); + va_end(ap); + + /* The API says "-1" means bad result, but we now also return "-2" in some + * cases. Force the return value to always be -1. */ + if (len < 0) + len = -1; + + return len; +} + +/* Format a command according to the Redis protocol using an hisds string and + * hi_sdscatfmt for the processing of arguments. This function takes the + * number of arguments, an array with arguments and an array with their + * lengths. If the latter is set to NULL, strlen will be used to compute the + * argument lengths. + */ +long long redisFormatSdsCommandArgv(hisds *target, int argc, const char **argv, + const size_t *argvlen) +{ + hisds cmd, aux; + unsigned long long totlen, len; + int j; + + /* Abort on a NULL target */ + if (target == NULL) + return -1; + + /* Calculate our total size */ + totlen = 1+countDigits(argc)+2; + for (j = 0; j < argc; j++) { + len = argvlen ? argvlen[j] : strlen(argv[j]); + totlen += bulklen(len); + } + + /* Use an SDS string for command construction */ + cmd = hi_sdsempty(); + if (cmd == NULL) + return -1; + + /* We already know how much storage we need */ + aux = hi_sdsMakeRoomFor(cmd, totlen); + if (aux == NULL) { + hi_sdsfree(cmd); + return -1; + } + + cmd = aux; + + /* Construct command */ + cmd = hi_sdscatfmt(cmd, "*%i\r\n", argc); + for (j=0; j < argc; j++) { + len = argvlen ? argvlen[j] : strlen(argv[j]); + cmd = hi_sdscatfmt(cmd, "$%U\r\n", len); + cmd = hi_sdscatlen(cmd, argv[j], len); + cmd = hi_sdscatlen(cmd, "\r\n", sizeof("\r\n")-1); + } + + assert(hi_sdslen(cmd)==totlen); + + *target = cmd; + return totlen; +} + +void redisFreeSdsCommand(hisds cmd) { + hi_sdsfree(cmd); +} + +/* Format a command according to the Redis protocol. This function takes the + * number of arguments, an array with arguments and an array with their + * lengths. If the latter is set to NULL, strlen will be used to compute the + * argument lengths. + */ +long long redisFormatCommandArgv(char **target, int argc, const char **argv, const size_t *argvlen) { + char *cmd = NULL; /* final command */ + size_t pos; /* position in final command */ + size_t len, totlen; + int j; + + /* Abort on a NULL target */ + if (target == NULL) + return -1; + + /* Calculate number of bytes needed for the command */ + totlen = 1+countDigits(argc)+2; + for (j = 0; j < argc; j++) { + len = argvlen ? argvlen[j] : strlen(argv[j]); + totlen += bulklen(len); + } + + /* Build the command at protocol level */ + cmd = hi_malloc(totlen+1); + if (cmd == NULL) + return -1; + + pos = sprintf(cmd,"*%d\r\n",argc); + for (j = 0; j < argc; j++) { + len = argvlen ? argvlen[j] : strlen(argv[j]); + pos += sprintf(cmd+pos,"$%zu\r\n",len); + memcpy(cmd+pos,argv[j],len); + pos += len; + cmd[pos++] = '\r'; + cmd[pos++] = '\n'; + } + assert(pos == totlen); + cmd[pos] = '\0'; + + *target = cmd; + return totlen; +} + +void redisFreeCommand(char *cmd) { + hi_free(cmd); +} + +void __redisSetError(redisContext *c, int type, const char *str) { + size_t len; + + c->err = type; + if (str != NULL) { + len = strlen(str); + len = len < (sizeof(c->errstr)-1) ? len : (sizeof(c->errstr)-1); + memcpy(c->errstr,str,len); + c->errstr[len] = '\0'; + } else { + /* Only REDIS_ERR_IO may lack a description! */ + assert(type == REDIS_ERR_IO); + strerror_r(errno, c->errstr, sizeof(c->errstr)); + } +} + +redisReader *redisReaderCreate(void) { + return redisReaderCreateWithFunctions(&defaultFunctions); +} + +static void redisPushAutoFree(void *privdata, void *reply) { + (void)privdata; + freeReplyObject(reply); +} + +static redisContext *redisContextInit(void) { + redisContext *c; + + c = hi_calloc(1, sizeof(*c)); + if (c == NULL) + return NULL; + + c->funcs = &redisContextDefaultFuncs; + + c->obuf = hi_sdsempty(); + c->reader = redisReaderCreate(); + c->fd = REDIS_INVALID_FD; + + if (c->obuf == NULL || c->reader == NULL) { + redisFree(c); + return NULL; + } + + return c; +} + +void redisFree(redisContext *c) { + if (c == NULL) + return; + redisNetClose(c); + + hi_sdsfree(c->obuf); + redisReaderFree(c->reader); + hi_free(c->tcp.host); + hi_free(c->tcp.source_addr); + hi_free(c->unix_sock.path); + hi_free(c->connect_timeout); + hi_free(c->command_timeout); + hi_free(c->saddr); + + if (c->privdata && c->free_privdata) + c->free_privdata(c->privdata); + + if (c->funcs->free_privctx) + c->funcs->free_privctx(c->privctx); + + memset(c, 0xff, sizeof(*c)); + hi_free(c); +} + +redisFD redisFreeKeepFd(redisContext *c) { + redisFD fd = c->fd; + c->fd = REDIS_INVALID_FD; + redisFree(c); + return fd; +} + +int redisReconnect(redisContext *c) { + c->err = 0; + memset(c->errstr, '\0', strlen(c->errstr)); + + if (c->privctx && c->funcs->free_privctx) { + c->funcs->free_privctx(c->privctx); + c->privctx = NULL; + } + + redisNetClose(c); + + hi_sdsfree(c->obuf); + redisReaderFree(c->reader); + + c->obuf = hi_sdsempty(); + c->reader = redisReaderCreate(); + + if (c->obuf == NULL || c->reader == NULL) { + __redisSetError(c, REDIS_ERR_OOM, "Out of memory"); + return REDIS_ERR; + } + + int ret = REDIS_ERR; + if (c->connection_type == REDIS_CONN_TCP) { + ret = redisContextConnectBindTcp(c, c->tcp.host, c->tcp.port, + c->connect_timeout, c->tcp.source_addr); + } else if (c->connection_type == REDIS_CONN_UNIX) { + ret = redisContextConnectUnix(c, c->unix_sock.path, c->connect_timeout); + } else { + /* Something bad happened here and shouldn't have. There isn't + enough information in the context to reconnect. */ + __redisSetError(c,REDIS_ERR_OTHER,"Not enough information to reconnect"); + ret = REDIS_ERR; + } + + if (c->command_timeout != NULL && (c->flags & REDIS_BLOCK) && c->fd != REDIS_INVALID_FD) { + redisContextSetTimeout(c, *c->command_timeout); + } + + return ret; +} + +redisContext *redisConnectWithOptions(const redisOptions *options) { + redisContext *c = redisContextInit(); + if (c == NULL) { + return NULL; + } + if (!(options->options & REDIS_OPT_NONBLOCK)) { + c->flags |= REDIS_BLOCK; + } + if (options->options & REDIS_OPT_REUSEADDR) { + c->flags |= REDIS_REUSEADDR; + } + if (options->options & REDIS_OPT_NOAUTOFREE) { + c->flags |= REDIS_NO_AUTO_FREE; + } + if (options->options & REDIS_OPT_NOAUTOFREEREPLIES) { + c->flags |= REDIS_NO_AUTO_FREE_REPLIES; + } + + /* Set any user supplied RESP3 PUSH handler or use freeReplyObject + * as a default unless specifically flagged that we don't want one. */ + if (options->push_cb != NULL) + redisSetPushCallback(c, options->push_cb); + else if (!(options->options & REDIS_OPT_NO_PUSH_AUTOFREE)) + redisSetPushCallback(c, redisPushAutoFree); + + c->privdata = options->privdata; + c->free_privdata = options->free_privdata; + + if (redisContextUpdateConnectTimeout(c, options->connect_timeout) != REDIS_OK || + redisContextUpdateCommandTimeout(c, options->command_timeout) != REDIS_OK) { + __redisSetError(c, REDIS_ERR_OOM, "Out of memory"); + return c; + } + + if (options->type == REDIS_CONN_TCP) { + redisContextConnectBindTcp(c, options->endpoint.tcp.ip, + options->endpoint.tcp.port, options->connect_timeout, + options->endpoint.tcp.source_addr); + } else if (options->type == REDIS_CONN_UNIX) { + redisContextConnectUnix(c, options->endpoint.unix_socket, + options->connect_timeout); + } else if (options->type == REDIS_CONN_USERFD) { + c->fd = options->endpoint.fd; + c->flags |= REDIS_CONNECTED; + } else { + redisFree(c); + return NULL; + } + + if (options->command_timeout != NULL && (c->flags & REDIS_BLOCK) && c->fd != REDIS_INVALID_FD) { + redisContextSetTimeout(c, *options->command_timeout); + } + + return c; +} + +/* Connect to a Redis instance. On error the field error in the returned + * context will be set to the return value of the error function. + * When no set of reply functions is given, the default set will be used. */ +redisContext *redisConnect(const char *ip, int port) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectWithTimeout(const char *ip, int port, const struct timeval tv) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + options.connect_timeout = &tv; + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectNonBlock(const char *ip, int port) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + options.options |= REDIS_OPT_NONBLOCK; + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectBindNonBlock(const char *ip, int port, + const char *source_addr) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + options.endpoint.tcp.source_addr = source_addr; + options.options |= REDIS_OPT_NONBLOCK; + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectBindNonBlockWithReuse(const char *ip, int port, + const char *source_addr) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + options.endpoint.tcp.source_addr = source_addr; + options.options |= REDIS_OPT_NONBLOCK|REDIS_OPT_REUSEADDR; + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectUnix(const char *path) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_UNIX(&options, path); + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectUnixWithTimeout(const char *path, const struct timeval tv) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_UNIX(&options, path); + options.connect_timeout = &tv; + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectUnixNonBlock(const char *path) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_UNIX(&options, path); + options.options |= REDIS_OPT_NONBLOCK; + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectFd(redisFD fd) { + redisOptions options = {0}; + options.type = REDIS_CONN_USERFD; + options.endpoint.fd = fd; + return redisConnectWithOptions(&options); +} + +/* Set read/write timeout on a blocking socket. */ +int redisSetTimeout(redisContext *c, const struct timeval tv) { + if (c->flags & REDIS_BLOCK) + return redisContextSetTimeout(c,tv); + return REDIS_ERR; +} + +/* Enable connection KeepAlive. */ +int redisEnableKeepAlive(redisContext *c) { + if (redisKeepAlive(c, REDIS_KEEPALIVE_INTERVAL) != REDIS_OK) + return REDIS_ERR; + return REDIS_OK; +} + +/* Set a user provided RESP3 PUSH handler and return any old one set. */ +redisPushFn *redisSetPushCallback(redisContext *c, redisPushFn *fn) { + redisPushFn *old = c->push_cb; + c->push_cb = fn; + return old; +} + +/* Use this function to handle a read event on the descriptor. It will try + * and read some bytes from the socket and feed them to the reply parser. + * + * After this function is called, you may use redisGetReplyFromReader to + * see if there is a reply available. */ +int redisBufferRead(redisContext *c) { + char buf[1024*16]; + int nread; + + /* Return early when the context has seen an error. */ + if (c->err) + return REDIS_ERR; + + nread = c->funcs->read(c, buf, sizeof(buf)); + if (nread < 0) { + return REDIS_ERR; + } + if (nread > 0 && redisReaderFeed(c->reader, buf, nread) != REDIS_OK) { + __redisSetError(c, c->reader->err, c->reader->errstr); + return REDIS_ERR; + } + return REDIS_OK; +} + +/* Write the output buffer to the socket. + * + * Returns REDIS_OK when the buffer is empty, or (a part of) the buffer was + * successfully written to the socket. When the buffer is empty after the + * write operation, "done" is set to 1 (if given). + * + * Returns REDIS_ERR if an error occurred trying to write and sets + * c->errstr to hold the appropriate error string. + */ +int redisBufferWrite(redisContext *c, int *done) { + + /* Return early when the context has seen an error. */ + if (c->err) + return REDIS_ERR; + + if (hi_sdslen(c->obuf) > 0) { + ssize_t nwritten = c->funcs->write(c); + if (nwritten < 0) { + return REDIS_ERR; + } else if (nwritten > 0) { + if (nwritten == (ssize_t)hi_sdslen(c->obuf)) { + hi_sdsfree(c->obuf); + c->obuf = hi_sdsempty(); + if (c->obuf == NULL) + goto oom; + } else { + if (hi_sdsrange(c->obuf,nwritten,-1) < 0) goto oom; + } + } + } + if (done != NULL) *done = (hi_sdslen(c->obuf) == 0); + return REDIS_OK; + +oom: + __redisSetError(c, REDIS_ERR_OOM, "Out of memory"); + return REDIS_ERR; +} + +/* Internal helper that returns 1 if the reply was a RESP3 PUSH + * message and we handled it with a user-provided callback. */ +static int redisHandledPushReply(redisContext *c, void *reply) { + if (reply && c->push_cb && redisIsPushReply(reply)) { + c->push_cb(c->privdata, reply); + return 1; + } + + return 0; +} + +/* Get a reply from our reader or set an error in the context. */ +int redisGetReplyFromReader(redisContext *c, void **reply) { + if (redisReaderGetReply(c->reader, reply) == REDIS_ERR) { + __redisSetError(c,c->reader->err,c->reader->errstr); + return REDIS_ERR; + } + + return REDIS_OK; +} + +/* Internal helper to get the next reply from our reader while handling + * any PUSH messages we encounter along the way. This is separate from + * redisGetReplyFromReader so as to not change its behavior. */ +static int redisNextInBandReplyFromReader(redisContext *c, void **reply) { + do { + if (redisGetReplyFromReader(c, reply) == REDIS_ERR) + return REDIS_ERR; + } while (redisHandledPushReply(c, *reply)); + + return REDIS_OK; +} + +int redisGetReply(redisContext *c, void **reply) { + int wdone = 0; + void *aux = NULL; + + /* Try to read pending replies */ + if (redisNextInBandReplyFromReader(c,&aux) == REDIS_ERR) + return REDIS_ERR; + + /* For the blocking context, flush output buffer and read reply */ + if (aux == NULL && c->flags & REDIS_BLOCK) { + /* Write until done */ + do { + if (redisBufferWrite(c,&wdone) == REDIS_ERR) + return REDIS_ERR; + } while (!wdone); + + /* Read until there is a reply */ + do { + if (redisBufferRead(c) == REDIS_ERR) + return REDIS_ERR; + + if (redisNextInBandReplyFromReader(c,&aux) == REDIS_ERR) + return REDIS_ERR; + } while (aux == NULL); + } + + /* Set reply or free it if we were passed NULL */ + if (reply != NULL) { + *reply = aux; + } else { + freeReplyObject(aux); + } + + return REDIS_OK; +} + + +/* Helper function for the redisAppendCommand* family of functions. + * + * Write a formatted command to the output buffer. When this family + * is used, you need to call redisGetReply yourself to retrieve + * the reply (or replies in pub/sub). + */ +int __redisAppendCommand(redisContext *c, const char *cmd, size_t len) { + hisds newbuf; + + newbuf = hi_sdscatlen(c->obuf,cmd,len); + if (newbuf == NULL) { + __redisSetError(c,REDIS_ERR_OOM,"Out of memory"); + return REDIS_ERR; + } + + c->obuf = newbuf; + return REDIS_OK; +} + +int redisAppendFormattedCommand(redisContext *c, const char *cmd, size_t len) { + + if (__redisAppendCommand(c, cmd, len) != REDIS_OK) { + return REDIS_ERR; + } + + return REDIS_OK; +} + +int redisvAppendCommand(redisContext *c, const char *format, va_list ap) { + char *cmd; + int len; + + len = redisvFormatCommand(&cmd,format,ap); + if (len == -1) { + __redisSetError(c,REDIS_ERR_OOM,"Out of memory"); + return REDIS_ERR; + } else if (len == -2) { + __redisSetError(c,REDIS_ERR_OTHER,"Invalid format string"); + return REDIS_ERR; + } + + if (__redisAppendCommand(c,cmd,len) != REDIS_OK) { + hi_free(cmd); + return REDIS_ERR; + } + + hi_free(cmd); + return REDIS_OK; +} + +int redisAppendCommand(redisContext *c, const char *format, ...) { + va_list ap; + int ret; + + va_start(ap,format); + ret = redisvAppendCommand(c,format,ap); + va_end(ap); + return ret; +} + +int redisAppendCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen) { + hisds cmd; + long long len; + + len = redisFormatSdsCommandArgv(&cmd,argc,argv,argvlen); + if (len == -1) { + __redisSetError(c,REDIS_ERR_OOM,"Out of memory"); + return REDIS_ERR; + } + + if (__redisAppendCommand(c,cmd,len) != REDIS_OK) { + hi_sdsfree(cmd); + return REDIS_ERR; + } + + hi_sdsfree(cmd); + return REDIS_OK; +} + +/* Helper function for the redisCommand* family of functions. + * + * Write a formatted command to the output buffer. If the given context is + * blocking, immediately read the reply into the "reply" pointer. When the + * context is non-blocking, the "reply" pointer will not be used and the + * command is simply appended to the write buffer. + * + * Returns the reply when a reply was successfully retrieved. Returns NULL + * otherwise. When NULL is returned in a blocking context, the error field + * in the context will be set. + */ +static void *__redisBlockForReply(redisContext *c) { + void *reply; + + if (c->flags & REDIS_BLOCK) { + if (redisGetReply(c,&reply) != REDIS_OK) + return NULL; + return reply; + } + return NULL; +} + +void *redisvCommand(redisContext *c, const char *format, va_list ap) { + if (redisvAppendCommand(c,format,ap) != REDIS_OK) + return NULL; + return __redisBlockForReply(c); +} + +void *redisCommand(redisContext *c, const char *format, ...) { + va_list ap; + va_start(ap,format); + void *reply = redisvCommand(c,format,ap); + va_end(ap); + return reply; +} + +void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen) { + if (redisAppendCommandArgv(c,argc,argv,argvlen) != REDIS_OK) + return NULL; + return __redisBlockForReply(c); +} diff --git a/deps/hiredis/hiredis.h b/deps/hiredis/hiredis.h new file mode 100644 index 0000000..9c65901 --- /dev/null +++ b/deps/hiredis/hiredis.h @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2014, Pieter Noordhuis + * Copyright (c) 2015, Matt Stancliff , + * Jan-Erik Rediger + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_H +#define __HIREDIS_H +#include "read.h" +#include /* for va_list */ +#ifndef _MSC_VER +#include /* for struct timeval */ +#else +struct timeval; /* forward declaration */ +typedef long long ssize_t; +#endif +#include /* uintXX_t, etc */ +#include "sds.h" /* for hisds */ +#include "alloc.h" /* for allocation wrappers */ + +#define HIREDIS_MAJOR 1 +#define HIREDIS_MINOR 0 +#define HIREDIS_PATCH 3 +#define HIREDIS_SONAME 1.0.3-dev + +/* Connection type can be blocking or non-blocking and is set in the + * least significant bit of the flags field in redisContext. */ +#define REDIS_BLOCK 0x1 + +/* Connection may be disconnected before being free'd. The second bit + * in the flags field is set when the context is connected. */ +#define REDIS_CONNECTED 0x2 + +/* The async API might try to disconnect cleanly and flush the output + * buffer and read all subsequent replies before disconnecting. + * This flag means no new commands can come in and the connection + * should be terminated once all replies have been read. */ +#define REDIS_DISCONNECTING 0x4 + +/* Flag specific to the async API which means that the context should be clean + * up as soon as possible. */ +#define REDIS_FREEING 0x8 + +/* Flag that is set when an async callback is executed. */ +#define REDIS_IN_CALLBACK 0x10 + +/* Flag that is set when the async context has one or more subscriptions. */ +#define REDIS_SUBSCRIBED 0x20 + +/* Flag that is set when monitor mode is active */ +#define REDIS_MONITORING 0x40 + +/* Flag that is set when we should set SO_REUSEADDR before calling bind() */ +#define REDIS_REUSEADDR 0x80 + +/* Flag that is set when the async connection supports push replies. */ +#define REDIS_SUPPORTS_PUSH 0x100 + +/** + * Flag that indicates the user does not want the context to + * be automatically freed upon error + */ +#define REDIS_NO_AUTO_FREE 0x200 + +/* Flag that indicates the user does not want replies to be automatically freed */ +#define REDIS_NO_AUTO_FREE_REPLIES 0x400 + +#define REDIS_KEEPALIVE_INTERVAL 15 /* seconds */ + +/* number of times we retry to connect in the case of EADDRNOTAVAIL and + * SO_REUSEADDR is being used. */ +#define REDIS_CONNECT_RETRIES 10 + +/* Forward declarations for structs defined elsewhere */ +struct redisAsyncContext; +struct redisContext; + +/* RESP3 push helpers and callback prototypes */ +#define redisIsPushReply(r) (((redisReply*)(r))->type == REDIS_REPLY_PUSH) +typedef void (redisPushFn)(void *, void *); +typedef void (redisAsyncPushFn)(struct redisAsyncContext *, void *); + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is the reply object returned by redisCommand() */ +typedef struct redisReply { + int type; /* REDIS_REPLY_* */ + long long integer; /* The integer when type is REDIS_REPLY_INTEGER */ + double dval; /* The double when type is REDIS_REPLY_DOUBLE */ + size_t len; /* Length of string */ + char *str; /* Used for REDIS_REPLY_ERROR, REDIS_REPLY_STRING + REDIS_REPLY_VERB, REDIS_REPLY_DOUBLE (in additional to dval), + and REDIS_REPLY_BIGNUM. */ + char vtype[4]; /* Used for REDIS_REPLY_VERB, contains the null + terminated 3 character content type, such as "txt". */ + size_t elements; /* number of elements, for REDIS_REPLY_ARRAY */ + struct redisReply **element; /* elements vector for REDIS_REPLY_ARRAY */ +} redisReply; + +redisReader *redisReaderCreate(void); + +/* Function to free the reply objects hiredis returns by default. */ +void freeReplyObject(void *reply); + +/* Functions to format a command according to the protocol. */ +int redisvFormatCommand(char **target, const char *format, va_list ap); +int redisFormatCommand(char **target, const char *format, ...); +long long redisFormatCommandArgv(char **target, int argc, const char **argv, const size_t *argvlen); +long long redisFormatSdsCommandArgv(hisds *target, int argc, const char ** argv, const size_t *argvlen); +void redisFreeCommand(char *cmd); +void redisFreeSdsCommand(hisds cmd); + +enum redisConnectionType { + REDIS_CONN_TCP, + REDIS_CONN_UNIX, + REDIS_CONN_USERFD +}; + +struct redisSsl; + +#define REDIS_OPT_NONBLOCK 0x01 +#define REDIS_OPT_REUSEADDR 0x02 + +/** + * Don't automatically free the async object on a connection failure, + * or other implicit conditions. Only free on an explicit call to disconnect() or free() + */ +#define REDIS_OPT_NOAUTOFREE 0x04 + +/* Don't automatically intercept and free RESP3 PUSH replies. */ +#define REDIS_OPT_NO_PUSH_AUTOFREE 0x08 + +/** + * Don't automatically free replies + */ +#define REDIS_OPT_NOAUTOFREEREPLIES 0x10 + +/* In Unix systems a file descriptor is a regular signed int, with -1 + * representing an invalid descriptor. In Windows it is a SOCKET + * (32- or 64-bit unsigned integer depending on the architecture), where + * all bits set (~0) is INVALID_SOCKET. */ +#ifndef _WIN32 +typedef int redisFD; +#define REDIS_INVALID_FD -1 +#else +#ifdef _WIN64 +typedef unsigned long long redisFD; /* SOCKET = 64-bit UINT_PTR */ +#else +typedef unsigned long redisFD; /* SOCKET = 32-bit UINT_PTR */ +#endif +#define REDIS_INVALID_FD ((redisFD)(~0)) /* INVALID_SOCKET */ +#endif + +typedef struct { + /* + * the type of connection to use. This also indicates which + * `endpoint` member field to use + */ + int type; + /* bit field of REDIS_OPT_xxx */ + int options; + /* timeout value for connect operation. If NULL, no timeout is used */ + const struct timeval *connect_timeout; + /* timeout value for commands. If NULL, no timeout is used. This can be + * updated at runtime with redisSetTimeout/redisAsyncSetTimeout. */ + const struct timeval *command_timeout; + union { + /** use this field for tcp/ip connections */ + struct { + const char *source_addr; + const char *ip; + int port; + } tcp; + /** use this field for unix domain sockets */ + const char *unix_socket; + /** + * use this field to have hiredis operate an already-open + * file descriptor */ + redisFD fd; + } endpoint; + + /* Optional user defined data/destructor */ + void *privdata; + void (*free_privdata)(void *); + + /* A user defined PUSH message callback */ + redisPushFn *push_cb; + redisAsyncPushFn *async_push_cb; +} redisOptions; + +/** + * Helper macros to initialize options to their specified fields. + */ +#define REDIS_OPTIONS_SET_TCP(opts, ip_, port_) \ + (opts)->type = REDIS_CONN_TCP; \ + (opts)->endpoint.tcp.ip = ip_; \ + (opts)->endpoint.tcp.port = port_; + +#define REDIS_OPTIONS_SET_UNIX(opts, path) \ + (opts)->type = REDIS_CONN_UNIX; \ + (opts)->endpoint.unix_socket = path; + +#define REDIS_OPTIONS_SET_PRIVDATA(opts, data, dtor) \ + (opts)->privdata = data; \ + (opts)->free_privdata = dtor; \ + +typedef struct redisContextFuncs { + void (*free_privctx)(void *); + void (*async_read)(struct redisAsyncContext *); + void (*async_write)(struct redisAsyncContext *); + ssize_t (*read)(struct redisContext *, char *, size_t); + ssize_t (*write)(struct redisContext *); +} redisContextFuncs; + +/* Context for a connection to Redis */ +typedef struct redisContext { + const redisContextFuncs *funcs; /* Function table */ + + int err; /* Error flags, 0 when there is no error */ + char errstr[128]; /* String representation of error when applicable */ + redisFD fd; + int flags; + char *obuf; /* Write buffer */ + redisReader *reader; /* Protocol reader */ + + enum redisConnectionType connection_type; + struct timeval *connect_timeout; + struct timeval *command_timeout; + + struct { + char *host; + char *source_addr; + int port; + } tcp; + + struct { + char *path; + } unix_sock; + + /* For non-blocking connect */ + struct sockaddr *saddr; + size_t addrlen; + + /* Optional data and corresponding destructor users can use to provide + * context to a given redisContext. Not used by hiredis. */ + void *privdata; + void (*free_privdata)(void *); + + /* Internal context pointer presently used by hiredis to manage + * SSL connections. */ + void *privctx; + + /* An optional RESP3 PUSH handler */ + redisPushFn *push_cb; +} redisContext; + +redisContext *redisConnectWithOptions(const redisOptions *options); +redisContext *redisConnect(const char *ip, int port); +redisContext *redisConnectWithTimeout(const char *ip, int port, const struct timeval tv); +redisContext *redisConnectNonBlock(const char *ip, int port); +redisContext *redisConnectBindNonBlock(const char *ip, int port, + const char *source_addr); +redisContext *redisConnectBindNonBlockWithReuse(const char *ip, int port, + const char *source_addr); +redisContext *redisConnectUnix(const char *path); +redisContext *redisConnectUnixWithTimeout(const char *path, const struct timeval tv); +redisContext *redisConnectUnixNonBlock(const char *path); +redisContext *redisConnectFd(redisFD fd); + +/** + * Reconnect the given context using the saved information. + * + * This re-uses the exact same connect options as in the initial connection. + * host, ip (or path), timeout and bind address are reused, + * flags are used unmodified from the existing context. + * + * Returns REDIS_OK on successful connect or REDIS_ERR otherwise. + */ +int redisReconnect(redisContext *c); + +redisPushFn *redisSetPushCallback(redisContext *c, redisPushFn *fn); +int redisSetTimeout(redisContext *c, const struct timeval tv); +int redisEnableKeepAlive(redisContext *c); +void redisFree(redisContext *c); +redisFD redisFreeKeepFd(redisContext *c); +int redisBufferRead(redisContext *c); +int redisBufferWrite(redisContext *c, int *done); + +/* In a blocking context, this function first checks if there are unconsumed + * replies to return and returns one if so. Otherwise, it flushes the output + * buffer to the socket and reads until it has a reply. In a non-blocking + * context, it will return unconsumed replies until there are no more. */ +int redisGetReply(redisContext *c, void **reply); +int redisGetReplyFromReader(redisContext *c, void **reply); + +/* Write a formatted command to the output buffer. Use these functions in blocking mode + * to get a pipeline of commands. */ +int redisAppendFormattedCommand(redisContext *c, const char *cmd, size_t len); + +/* Write a command to the output buffer. Use these functions in blocking mode + * to get a pipeline of commands. */ +int redisvAppendCommand(redisContext *c, const char *format, va_list ap); +int redisAppendCommand(redisContext *c, const char *format, ...); +int redisAppendCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); + +/* Issue a command to Redis. In a blocking context, it is identical to calling + * redisAppendCommand, followed by redisGetReply. The function will return + * NULL if there was an error in performing the request, otherwise it will + * return the reply. In a non-blocking context, it is identical to calling + * only redisAppendCommand and will always return NULL. */ +void *redisvCommand(redisContext *c, const char *format, va_list ap); +void *redisCommand(redisContext *c, const char *format, ...); +void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/deps/hiredis/hiredis.pc.in b/deps/hiredis/hiredis.pc.in new file mode 100644 index 0000000..91b7731 --- /dev/null +++ b/deps/hiredis/hiredis.pc.in @@ -0,0 +1,12 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +install_libdir=@CMAKE_INSTALL_LIBDIR@ +exec_prefix=${prefix} +libdir=${exec_prefix}/${install_libdir} +includedir=${prefix}/include +pkgincludedir=${includedir}/hiredis + +Name: hiredis +Description: Minimalistic C client library for Redis. +Version: @PROJECT_VERSION@ +Libs: -L${libdir} -lhiredis +Cflags: -I${pkgincludedir} -D_FILE_OFFSET_BITS=64 diff --git a/deps/hiredis/hiredis.targets b/deps/hiredis/hiredis.targets new file mode 100644 index 0000000..effd8a5 --- /dev/null +++ b/deps/hiredis/hiredis.targets @@ -0,0 +1,11 @@ + + + + + $(MSBuildThisFileDirectory)\..\..\include;%(AdditionalIncludeDirectories) + + + $(MSBuildThisFileDirectory)\..\..\lib;%(AdditionalLibraryDirectories) + + + \ No newline at end of file diff --git a/deps/hiredis/hiredis_ssl-config.cmake.in b/deps/hiredis/hiredis_ssl-config.cmake.in new file mode 100644 index 0000000..9a283df --- /dev/null +++ b/deps/hiredis/hiredis_ssl-config.cmake.in @@ -0,0 +1,13 @@ +@PACKAGE_INIT@ + +set_and_check(hiredis_ssl_INCLUDEDIR "@PACKAGE_INCLUDE_INSTALL_DIR@") + +IF (NOT TARGET hiredis::hiredis_ssl) + INCLUDE(${CMAKE_CURRENT_LIST_DIR}/hiredis_ssl-targets.cmake) +ENDIF() + +SET(hiredis_ssl_LIBRARIES hiredis::hiredis_ssl) +SET(hiredis_ssl_INCLUDE_DIRS ${hiredis_ssl_INCLUDEDIR}) + +check_required_components(hiredis_ssl) + diff --git a/deps/hiredis/hiredis_ssl.h b/deps/hiredis/hiredis_ssl.h new file mode 100644 index 0000000..50fb77c --- /dev/null +++ b/deps/hiredis/hiredis_ssl.h @@ -0,0 +1,129 @@ + +/* + * Copyright (c) 2019, Redis Labs + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_SSL_H +#define __HIREDIS_SSL_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is the underlying struct for SSL in ssl.h, which is not included to + * keep build dependencies short here. + */ +struct ssl_st; + +/* A wrapper around OpenSSL SSL_CTX to allow easy SSL use without directly + * calling OpenSSL. + */ +typedef struct redisSSLContext redisSSLContext; + +/** + * Initialization errors that redisCreateSSLContext() may return. + */ + +typedef enum { + REDIS_SSL_CTX_NONE = 0, /* No Error */ + REDIS_SSL_CTX_CREATE_FAILED, /* Failed to create OpenSSL SSL_CTX */ + REDIS_SSL_CTX_CERT_KEY_REQUIRED, /* Client cert and key must both be specified or skipped */ + REDIS_SSL_CTX_CA_CERT_LOAD_FAILED, /* Failed to load CA Certificate or CA Path */ + REDIS_SSL_CTX_CLIENT_CERT_LOAD_FAILED, /* Failed to load client certificate */ + REDIS_SSL_CTX_PRIVATE_KEY_LOAD_FAILED, /* Failed to load private key */ + REDIS_SSL_CTX_OS_CERTSTORE_OPEN_FAILED, /* Failed to open system certificate store */ + REDIS_SSL_CTX_OS_CERT_ADD_FAILED /* Failed to add CA certificates obtained from system to the SSL context */ +} redisSSLContextError; + +/** + * Return the error message corresponding with the specified error code. + */ + +const char *redisSSLContextGetError(redisSSLContextError error); + +/** + * Helper function to initialize the OpenSSL library. + * + * OpenSSL requires one-time initialization before it can be used. Callers should + * call this function only once, and only if OpenSSL is not directly initialized + * elsewhere. + */ +int redisInitOpenSSL(void); + +/** + * Helper function to initialize an OpenSSL context that can be used + * to initiate SSL connections. + * + * cacert_filename is an optional name of a CA certificate/bundle file to load + * and use for validation. + * + * capath is an optional directory path where trusted CA certificate files are + * stored in an OpenSSL-compatible structure. + * + * cert_filename and private_key_filename are optional names of a client side + * certificate and private key files to use for authentication. They need to + * be both specified or omitted. + * + * server_name is an optional and will be used as a server name indication + * (SNI) TLS extension. + * + * If error is non-null, it will be populated in case the context creation fails + * (returning a NULL). + */ + +redisSSLContext *redisCreateSSLContext(const char *cacert_filename, const char *capath, + const char *cert_filename, const char *private_key_filename, + const char *server_name, redisSSLContextError *error); + +/** + * Free a previously created OpenSSL context. + */ +void redisFreeSSLContext(redisSSLContext *redis_ssl_ctx); + +/** + * Initiate SSL on an existing redisContext. + * + * This is similar to redisInitiateSSL() but does not require the caller + * to directly interact with OpenSSL, and instead uses a redisSSLContext + * previously created using redisCreateSSLContext(). + */ + +int redisInitiateSSLWithContext(redisContext *c, redisSSLContext *redis_ssl_ctx); + +/** + * Initiate SSL/TLS negotiation on a provided OpenSSL SSL object. + */ + +int redisInitiateSSL(redisContext *c, struct ssl_st *ssl); + +#ifdef __cplusplus +} +#endif + +#endif /* __HIREDIS_SSL_H */ diff --git a/deps/hiredis/hiredis_ssl.pc.in b/deps/hiredis/hiredis_ssl.pc.in new file mode 100644 index 0000000..588a978 --- /dev/null +++ b/deps/hiredis/hiredis_ssl.pc.in @@ -0,0 +1,12 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +includedir=${prefix}/include +pkgincludedir=${includedir}/hiredis + +Name: hiredis_ssl +Description: SSL Support for hiredis. +Version: @PROJECT_VERSION@ +Requires: hiredis +Libs: -L${libdir} -lhiredis_ssl +Libs.private: -lssl -lcrypto diff --git a/deps/hiredis/net.c b/deps/hiredis/net.c new file mode 100644 index 0000000..88f9aff --- /dev/null +++ b/deps/hiredis/net.c @@ -0,0 +1,612 @@ +/* Extracted from anet.c to work properly with Hiredis error reporting. + * + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2014, Pieter Noordhuis + * Copyright (c) 2015, Matt Stancliff , + * Jan-Erik Rediger + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "net.h" +#include "sds.h" +#include "sockcompat.h" +#include "win32.h" + +/* Defined in hiredis.c */ +void __redisSetError(redisContext *c, int type, const char *str); + +void redisNetClose(redisContext *c) { + if (c && c->fd != REDIS_INVALID_FD) { + close(c->fd); + c->fd = REDIS_INVALID_FD; + } +} + +ssize_t redisNetRead(redisContext *c, char *buf, size_t bufcap) { + ssize_t nread = recv(c->fd, buf, bufcap, 0); + if (nread == -1) { + if ((errno == EWOULDBLOCK && !(c->flags & REDIS_BLOCK)) || (errno == EINTR)) { + /* Try again later */ + return 0; + } else if(errno == ETIMEDOUT && (c->flags & REDIS_BLOCK)) { + /* especially in windows */ + __redisSetError(c, REDIS_ERR_TIMEOUT, "recv timeout"); + return -1; + } else { + __redisSetError(c, REDIS_ERR_IO, NULL); + return -1; + } + } else if (nread == 0) { + __redisSetError(c, REDIS_ERR_EOF, "Server closed the connection"); + return -1; + } else { + return nread; + } +} + +ssize_t redisNetWrite(redisContext *c) { + ssize_t nwritten = send(c->fd, c->obuf, hi_sdslen(c->obuf), 0); + if (nwritten < 0) { + if ((errno == EWOULDBLOCK && !(c->flags & REDIS_BLOCK)) || (errno == EINTR)) { + /* Try again later */ + } else { + __redisSetError(c, REDIS_ERR_IO, NULL); + return -1; + } + } + return nwritten; +} + +static void __redisSetErrorFromErrno(redisContext *c, int type, const char *prefix) { + int errorno = errno; /* snprintf() may change errno */ + char buf[128] = { 0 }; + size_t len = 0; + + if (prefix != NULL) + len = snprintf(buf,sizeof(buf),"%s: ",prefix); + strerror_r(errorno, (char *)(buf + len), sizeof(buf) - len); + __redisSetError(c,type,buf); +} + +static int redisSetReuseAddr(redisContext *c) { + int on = 1; + if (setsockopt(c->fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); + redisNetClose(c); + return REDIS_ERR; + } + return REDIS_OK; +} + +static int redisCreateSocket(redisContext *c, int type) { + redisFD s; + if ((s = socket(type, SOCK_STREAM, 0)) == REDIS_INVALID_FD) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); + return REDIS_ERR; + } + c->fd = s; + if (type == AF_INET) { + if (redisSetReuseAddr(c) == REDIS_ERR) { + return REDIS_ERR; + } + } + return REDIS_OK; +} + +static int redisSetBlocking(redisContext *c, int blocking) { +#ifndef _WIN32 + int flags; + + /* Set the socket nonblocking. + * Note that fcntl(2) for F_GETFL and F_SETFL can't be + * interrupted by a signal. */ + if ((flags = fcntl(c->fd, F_GETFL)) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,"fcntl(F_GETFL)"); + redisNetClose(c); + return REDIS_ERR; + } + + if (blocking) + flags &= ~O_NONBLOCK; + else + flags |= O_NONBLOCK; + + if (fcntl(c->fd, F_SETFL, flags) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,"fcntl(F_SETFL)"); + redisNetClose(c); + return REDIS_ERR; + } +#else + u_long mode = blocking ? 0 : 1; + if (ioctl(c->fd, FIONBIO, &mode) == -1) { + __redisSetErrorFromErrno(c, REDIS_ERR_IO, "ioctl(FIONBIO)"); + redisNetClose(c); + return REDIS_ERR; + } +#endif /* _WIN32 */ + return REDIS_OK; +} + +int redisKeepAlive(redisContext *c, int interval) { + int val = 1; + redisFD fd = c->fd; + + if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val)) == -1){ + __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); + return REDIS_ERR; + } + + val = interval; + +#if defined(__APPLE__) && defined(__MACH__) + if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &val, sizeof(val)) < 0) { + __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); + return REDIS_ERR; + } +#else +#if defined(__GLIBC__) && !defined(__FreeBSD_kernel__) + if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &val, sizeof(val)) < 0) { + __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); + return REDIS_ERR; + } + + val = interval/3; + if (val == 0) val = 1; + if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &val, sizeof(val)) < 0) { + __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); + return REDIS_ERR; + } + + val = 3; + if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &val, sizeof(val)) < 0) { + __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); + return REDIS_ERR; + } +#endif +#endif + + return REDIS_OK; +} + +int redisSetTcpNoDelay(redisContext *c) { + int yes = 1; + if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, &yes, sizeof(yes)) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,"setsockopt(TCP_NODELAY)"); + redisNetClose(c); + return REDIS_ERR; + } + return REDIS_OK; +} + +#define __MAX_MSEC (((LONG_MAX) - 999) / 1000) + +static int redisContextTimeoutMsec(redisContext *c, long *result) +{ + const struct timeval *timeout = c->connect_timeout; + long msec = -1; + + /* Only use timeout when not NULL. */ + if (timeout != NULL) { + if (timeout->tv_usec > 1000000 || timeout->tv_sec > __MAX_MSEC) { + *result = msec; + return REDIS_ERR; + } + + msec = (timeout->tv_sec * 1000) + ((timeout->tv_usec + 999) / 1000); + + if (msec < 0 || msec > INT_MAX) { + msec = INT_MAX; + } + } + + *result = msec; + return REDIS_OK; +} + +static int redisContextWaitReady(redisContext *c, long msec) { + struct pollfd wfd[1]; + + wfd[0].fd = c->fd; + wfd[0].events = POLLOUT; + + if (errno == EINPROGRESS) { + int res; + + if ((res = poll(wfd, 1, msec)) == -1) { + __redisSetErrorFromErrno(c, REDIS_ERR_IO, "poll(2)"); + redisNetClose(c); + return REDIS_ERR; + } else if (res == 0) { + errno = ETIMEDOUT; + __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); + redisNetClose(c); + return REDIS_ERR; + } + + if (redisCheckConnectDone(c, &res) != REDIS_OK || res == 0) { + redisCheckSocketError(c); + return REDIS_ERR; + } + + return REDIS_OK; + } + + __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); + redisNetClose(c); + return REDIS_ERR; +} + +int redisCheckConnectDone(redisContext *c, int *completed) { + int rc = connect(c->fd, (const struct sockaddr *)c->saddr, c->addrlen); + if (rc == 0) { + *completed = 1; + return REDIS_OK; + } + switch (errno) { + case EISCONN: + *completed = 1; + return REDIS_OK; + case EALREADY: + case EINPROGRESS: + case EWOULDBLOCK: + *completed = 0; + return REDIS_OK; + default: + return REDIS_ERR; + } +} + +int redisCheckSocketError(redisContext *c) { + int err = 0, errno_saved = errno; + socklen_t errlen = sizeof(err); + + if (getsockopt(c->fd, SOL_SOCKET, SO_ERROR, &err, &errlen) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,"getsockopt(SO_ERROR)"); + return REDIS_ERR; + } + + if (err == 0) { + err = errno_saved; + } + + if (err) { + errno = err; + __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); + return REDIS_ERR; + } + + return REDIS_OK; +} + +int redisContextSetTimeout(redisContext *c, const struct timeval tv) { + const void *to_ptr = &tv; + size_t to_sz = sizeof(tv); + + if (setsockopt(c->fd,SOL_SOCKET,SO_RCVTIMEO,to_ptr,to_sz) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,"setsockopt(SO_RCVTIMEO)"); + return REDIS_ERR; + } + if (setsockopt(c->fd,SOL_SOCKET,SO_SNDTIMEO,to_ptr,to_sz) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,"setsockopt(SO_SNDTIMEO)"); + return REDIS_ERR; + } + return REDIS_OK; +} + +int redisContextUpdateConnectTimeout(redisContext *c, const struct timeval *timeout) { + /* Same timeval struct, short circuit */ + if (c->connect_timeout == timeout) + return REDIS_OK; + + /* Allocate context timeval if we need to */ + if (c->connect_timeout == NULL) { + c->connect_timeout = hi_malloc(sizeof(*c->connect_timeout)); + if (c->connect_timeout == NULL) + return REDIS_ERR; + } + + memcpy(c->connect_timeout, timeout, sizeof(*c->connect_timeout)); + return REDIS_OK; +} + +int redisContextUpdateCommandTimeout(redisContext *c, const struct timeval *timeout) { + /* Same timeval struct, short circuit */ + if (c->command_timeout == timeout) + return REDIS_OK; + + /* Allocate context timeval if we need to */ + if (c->command_timeout == NULL) { + c->command_timeout = hi_malloc(sizeof(*c->command_timeout)); + if (c->command_timeout == NULL) + return REDIS_ERR; + } + + memcpy(c->command_timeout, timeout, sizeof(*c->command_timeout)); + return REDIS_OK; +} + +static int _redisContextConnectTcp(redisContext *c, const char *addr, int port, + const struct timeval *timeout, + const char *source_addr) { + redisFD s; + int rv, n; + char _port[6]; /* strlen("65535"); */ + struct addrinfo hints, *servinfo, *bservinfo, *p, *b; + int blocking = (c->flags & REDIS_BLOCK); + int reuseaddr = (c->flags & REDIS_REUSEADDR); + int reuses = 0; + long timeout_msec = -1; + + servinfo = NULL; + c->connection_type = REDIS_CONN_TCP; + c->tcp.port = port; + + /* We need to take possession of the passed parameters + * to make them reusable for a reconnect. + * We also carefully check we don't free data we already own, + * as in the case of the reconnect method. + * + * This is a bit ugly, but atleast it works and doesn't leak memory. + **/ + if (c->tcp.host != addr) { + hi_free(c->tcp.host); + + c->tcp.host = hi_strdup(addr); + if (c->tcp.host == NULL) + goto oom; + } + + if (timeout) { + if (redisContextUpdateConnectTimeout(c, timeout) == REDIS_ERR) + goto oom; + } else { + hi_free(c->connect_timeout); + c->connect_timeout = NULL; + } + + if (redisContextTimeoutMsec(c, &timeout_msec) != REDIS_OK) { + __redisSetError(c, REDIS_ERR_IO, "Invalid timeout specified"); + goto error; + } + + if (source_addr == NULL) { + hi_free(c->tcp.source_addr); + c->tcp.source_addr = NULL; + } else if (c->tcp.source_addr != source_addr) { + hi_free(c->tcp.source_addr); + c->tcp.source_addr = hi_strdup(source_addr); + } + + snprintf(_port, 6, "%d", port); + memset(&hints,0,sizeof(hints)); + hints.ai_family = AF_INET; + hints.ai_socktype = SOCK_STREAM; + + /* Try with IPv6 if no IPv4 address was found. We do it in this order since + * in a Redis client you can't afford to test if you have IPv6 connectivity + * as this would add latency to every connect. Otherwise a more sensible + * route could be: Use IPv6 if both addresses are available and there is IPv6 + * connectivity. */ + if ((rv = getaddrinfo(c->tcp.host,_port,&hints,&servinfo)) != 0) { + hints.ai_family = AF_INET6; + if ((rv = getaddrinfo(addr,_port,&hints,&servinfo)) != 0) { + __redisSetError(c,REDIS_ERR_OTHER,gai_strerror(rv)); + return REDIS_ERR; + } + } + for (p = servinfo; p != NULL; p = p->ai_next) { +addrretry: + if ((s = socket(p->ai_family,p->ai_socktype,p->ai_protocol)) == REDIS_INVALID_FD) + continue; + + c->fd = s; + if (redisSetBlocking(c,0) != REDIS_OK) + goto error; + if (c->tcp.source_addr) { + int bound = 0; + /* Using getaddrinfo saves us from self-determining IPv4 vs IPv6 */ + if ((rv = getaddrinfo(c->tcp.source_addr, NULL, &hints, &bservinfo)) != 0) { + char buf[128]; + snprintf(buf,sizeof(buf),"Can't get addr: %s",gai_strerror(rv)); + __redisSetError(c,REDIS_ERR_OTHER,buf); + goto error; + } + + if (reuseaddr) { + n = 1; + if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char*) &n, + sizeof(n)) < 0) { + freeaddrinfo(bservinfo); + goto error; + } + } + + for (b = bservinfo; b != NULL; b = b->ai_next) { + if (bind(s,b->ai_addr,b->ai_addrlen) != -1) { + bound = 1; + break; + } + } + freeaddrinfo(bservinfo); + if (!bound) { + char buf[128]; + snprintf(buf,sizeof(buf),"Can't bind socket: %s",strerror(errno)); + __redisSetError(c,REDIS_ERR_OTHER,buf); + goto error; + } + } + + /* For repeat connection */ + hi_free(c->saddr); + c->saddr = hi_malloc(p->ai_addrlen); + if (c->saddr == NULL) + goto oom; + + memcpy(c->saddr, p->ai_addr, p->ai_addrlen); + c->addrlen = p->ai_addrlen; + + if (connect(s,p->ai_addr,p->ai_addrlen) == -1) { + if (errno == EHOSTUNREACH) { + redisNetClose(c); + continue; + } else if (errno == EINPROGRESS) { + if (blocking) { + goto wait_for_ready; + } + /* This is ok. + * Note that even when it's in blocking mode, we unset blocking + * for `connect()` + */ + } else if (errno == EADDRNOTAVAIL && reuseaddr) { + if (++reuses >= REDIS_CONNECT_RETRIES) { + goto error; + } else { + redisNetClose(c); + goto addrretry; + } + } else { + wait_for_ready: + if (redisContextWaitReady(c,timeout_msec) != REDIS_OK) + goto error; + if (redisSetTcpNoDelay(c) != REDIS_OK) + goto error; + } + } + if (blocking && redisSetBlocking(c,1) != REDIS_OK) + goto error; + + c->flags |= REDIS_CONNECTED; + rv = REDIS_OK; + goto end; + } + if (p == NULL) { + char buf[128]; + snprintf(buf,sizeof(buf),"Can't create socket: %s",strerror(errno)); + __redisSetError(c,REDIS_ERR_OTHER,buf); + goto error; + } + +oom: + __redisSetError(c, REDIS_ERR_OOM, "Out of memory"); +error: + rv = REDIS_ERR; +end: + if(servinfo) { + freeaddrinfo(servinfo); + } + + return rv; // Need to return REDIS_OK if alright +} + +int redisContextConnectTcp(redisContext *c, const char *addr, int port, + const struct timeval *timeout) { + return _redisContextConnectTcp(c, addr, port, timeout, NULL); +} + +int redisContextConnectBindTcp(redisContext *c, const char *addr, int port, + const struct timeval *timeout, + const char *source_addr) { + return _redisContextConnectTcp(c, addr, port, timeout, source_addr); +} + +int redisContextConnectUnix(redisContext *c, const char *path, const struct timeval *timeout) { +#ifndef _WIN32 + int blocking = (c->flags & REDIS_BLOCK); + struct sockaddr_un *sa; + long timeout_msec = -1; + + if (redisCreateSocket(c,AF_UNIX) < 0) + return REDIS_ERR; + if (redisSetBlocking(c,0) != REDIS_OK) + return REDIS_ERR; + + c->connection_type = REDIS_CONN_UNIX; + if (c->unix_sock.path != path) { + hi_free(c->unix_sock.path); + + c->unix_sock.path = hi_strdup(path); + if (c->unix_sock.path == NULL) + goto oom; + } + + if (timeout) { + if (redisContextUpdateConnectTimeout(c, timeout) == REDIS_ERR) + goto oom; + } else { + hi_free(c->connect_timeout); + c->connect_timeout = NULL; + } + + if (redisContextTimeoutMsec(c,&timeout_msec) != REDIS_OK) + return REDIS_ERR; + + /* Don't leak sockaddr if we're reconnecting */ + if (c->saddr) hi_free(c->saddr); + + sa = (struct sockaddr_un*)(c->saddr = hi_malloc(sizeof(struct sockaddr_un))); + if (sa == NULL) + goto oom; + + c->addrlen = sizeof(struct sockaddr_un); + sa->sun_family = AF_UNIX; + strncpy(sa->sun_path, path, sizeof(sa->sun_path) - 1); + if (connect(c->fd, (struct sockaddr*)sa, sizeof(*sa)) == -1) { + if (errno == EINPROGRESS && !blocking) { + /* This is ok. */ + } else { + if (redisContextWaitReady(c,timeout_msec) != REDIS_OK) + return REDIS_ERR; + } + } + + /* Reset socket to be blocking after connect(2). */ + if (blocking && redisSetBlocking(c,1) != REDIS_OK) + return REDIS_ERR; + + c->flags |= REDIS_CONNECTED; + return REDIS_OK; +#else + /* We currently do not support Unix sockets for Windows. */ + /* TODO(m): https://devblogs.microsoft.com/commandline/af_unix-comes-to-windows/ */ + errno = EPROTONOSUPPORT; + return REDIS_ERR; +#endif /* _WIN32 */ +oom: + __redisSetError(c, REDIS_ERR_OOM, "Out of memory"); + return REDIS_ERR; +} diff --git a/deps/hiredis/net.h b/deps/hiredis/net.h new file mode 100644 index 0000000..9f43283 --- /dev/null +++ b/deps/hiredis/net.h @@ -0,0 +1,56 @@ +/* Extracted from anet.c to work properly with Hiredis error reporting. + * + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2014, Pieter Noordhuis + * Copyright (c) 2015, Matt Stancliff , + * Jan-Erik Rediger + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __NET_H +#define __NET_H + +#include "hiredis.h" + +void redisNetClose(redisContext *c); +ssize_t redisNetRead(redisContext *c, char *buf, size_t bufcap); +ssize_t redisNetWrite(redisContext *c); + +int redisCheckSocketError(redisContext *c); +int redisContextSetTimeout(redisContext *c, const struct timeval tv); +int redisContextConnectTcp(redisContext *c, const char *addr, int port, const struct timeval *timeout); +int redisContextConnectBindTcp(redisContext *c, const char *addr, int port, + const struct timeval *timeout, + const char *source_addr); +int redisContextConnectUnix(redisContext *c, const char *path, const struct timeval *timeout); +int redisKeepAlive(redisContext *c, int interval); +int redisCheckConnectDone(redisContext *c, int *completed); + +int redisSetTcpNoDelay(redisContext *c); + +#endif diff --git a/deps/hiredis/read.c b/deps/hiredis/read.c new file mode 100644 index 0000000..6c19c5a --- /dev/null +++ b/deps/hiredis/read.c @@ -0,0 +1,785 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include +#include +#ifndef _MSC_VER +#include +#include +#endif +#include +#include +#include +#include +#include + +#include "alloc.h" +#include "read.h" +#include "sds.h" +#include "win32.h" + +/* Initial size of our nested reply stack and how much we grow it when needd */ +#define REDIS_READER_STACK_SIZE 9 + +static void __redisReaderSetError(redisReader *r, int type, const char *str) { + size_t len; + + if (r->reply != NULL && r->fn && r->fn->freeObject) { + r->fn->freeObject(r->reply); + r->reply = NULL; + } + + /* Clear input buffer on errors. */ + hi_sdsfree(r->buf); + r->buf = NULL; + r->pos = r->len = 0; + + /* Reset task stack. */ + r->ridx = -1; + + /* Set error. */ + r->err = type; + len = strlen(str); + len = len < (sizeof(r->errstr)-1) ? len : (sizeof(r->errstr)-1); + memcpy(r->errstr,str,len); + r->errstr[len] = '\0'; +} + +static size_t chrtos(char *buf, size_t size, char byte) { + size_t len = 0; + + switch(byte) { + case '\\': + case '"': + len = snprintf(buf,size,"\"\\%c\"",byte); + break; + case '\n': len = snprintf(buf,size,"\"\\n\""); break; + case '\r': len = snprintf(buf,size,"\"\\r\""); break; + case '\t': len = snprintf(buf,size,"\"\\t\""); break; + case '\a': len = snprintf(buf,size,"\"\\a\""); break; + case '\b': len = snprintf(buf,size,"\"\\b\""); break; + default: + if (isprint(byte)) + len = snprintf(buf,size,"\"%c\"",byte); + else + len = snprintf(buf,size,"\"\\x%02x\"",(unsigned char)byte); + break; + } + + return len; +} + +static void __redisReaderSetErrorProtocolByte(redisReader *r, char byte) { + char cbuf[8], sbuf[128]; + + chrtos(cbuf,sizeof(cbuf),byte); + snprintf(sbuf,sizeof(sbuf), + "Protocol error, got %s as reply type byte", cbuf); + __redisReaderSetError(r,REDIS_ERR_PROTOCOL,sbuf); +} + +static void __redisReaderSetErrorOOM(redisReader *r) { + __redisReaderSetError(r,REDIS_ERR_OOM,"Out of memory"); +} + +static char *readBytes(redisReader *r, unsigned int bytes) { + char *p; + if (r->len-r->pos >= bytes) { + p = r->buf+r->pos; + r->pos += bytes; + return p; + } + return NULL; +} + +/* Find pointer to \r\n. */ +static char *seekNewline(char *s, size_t len) { + char *ret; + + /* We cannot match with fewer than 2 bytes */ + if (len < 2) + return NULL; + + /* Search up to len - 1 characters */ + len--; + + /* Look for the \r */ + while ((ret = memchr(s, '\r', len)) != NULL) { + if (ret[1] == '\n') { + /* Found. */ + break; + } + /* Continue searching. */ + ret++; + len -= ret - s; + s = ret; + } + + return ret; +} + +/* Convert a string into a long long. Returns REDIS_OK if the string could be + * parsed into a (non-overflowing) long long, REDIS_ERR otherwise. The value + * will be set to the parsed value when appropriate. + * + * Note that this function demands that the string strictly represents + * a long long: no spaces or other characters before or after the string + * representing the number are accepted, nor zeroes at the start if not + * for the string "0" representing the zero number. + * + * Because of its strictness, it is safe to use this function to check if + * you can convert a string into a long long, and obtain back the string + * from the number without any loss in the string representation. */ +static int string2ll(const char *s, size_t slen, long long *value) { + const char *p = s; + size_t plen = 0; + int negative = 0; + unsigned long long v; + + if (plen == slen) + return REDIS_ERR; + + /* Special case: first and only digit is 0. */ + if (slen == 1 && p[0] == '0') { + if (value != NULL) *value = 0; + return REDIS_OK; + } + + if (p[0] == '-') { + negative = 1; + p++; plen++; + + /* Abort on only a negative sign. */ + if (plen == slen) + return REDIS_ERR; + } + + /* First digit should be 1-9, otherwise the string should just be 0. */ + if (p[0] >= '1' && p[0] <= '9') { + v = p[0]-'0'; + p++; plen++; + } else if (p[0] == '0' && slen == 1) { + *value = 0; + return REDIS_OK; + } else { + return REDIS_ERR; + } + + while (plen < slen && p[0] >= '0' && p[0] <= '9') { + if (v > (ULLONG_MAX / 10)) /* Overflow. */ + return REDIS_ERR; + v *= 10; + + if (v > (ULLONG_MAX - (p[0]-'0'))) /* Overflow. */ + return REDIS_ERR; + v += p[0]-'0'; + + p++; plen++; + } + + /* Return if not all bytes were used. */ + if (plen < slen) + return REDIS_ERR; + + if (negative) { + if (v > ((unsigned long long)(-(LLONG_MIN+1))+1)) /* Overflow. */ + return REDIS_ERR; + if (value != NULL) *value = -v; + } else { + if (v > LLONG_MAX) /* Overflow. */ + return REDIS_ERR; + if (value != NULL) *value = v; + } + return REDIS_OK; +} + +static char *readLine(redisReader *r, int *_len) { + char *p, *s; + int len; + + p = r->buf+r->pos; + s = seekNewline(p,(r->len-r->pos)); + if (s != NULL) { + len = s-(r->buf+r->pos); + r->pos += len+2; /* skip \r\n */ + if (_len) *_len = len; + return p; + } + return NULL; +} + +static void moveToNextTask(redisReader *r) { + redisReadTask *cur, *prv; + while (r->ridx >= 0) { + /* Return a.s.a.p. when the stack is now empty. */ + if (r->ridx == 0) { + r->ridx--; + return; + } + + cur = r->task[r->ridx]; + prv = r->task[r->ridx-1]; + assert(prv->type == REDIS_REPLY_ARRAY || + prv->type == REDIS_REPLY_MAP || + prv->type == REDIS_REPLY_SET || + prv->type == REDIS_REPLY_PUSH); + if (cur->idx == prv->elements-1) { + r->ridx--; + } else { + /* Reset the type because the next item can be anything */ + assert(cur->idx < prv->elements); + cur->type = -1; + cur->elements = -1; + cur->idx++; + return; + } + } +} + +static int processLineItem(redisReader *r) { + redisReadTask *cur = r->task[r->ridx]; + void *obj; + char *p; + int len; + + if ((p = readLine(r,&len)) != NULL) { + if (cur->type == REDIS_REPLY_INTEGER) { + long long v; + + if (string2ll(p, len, &v) == REDIS_ERR) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bad integer value"); + return REDIS_ERR; + } + + if (r->fn && r->fn->createInteger) { + obj = r->fn->createInteger(cur,v); + } else { + obj = (void*)REDIS_REPLY_INTEGER; + } + } else if (cur->type == REDIS_REPLY_DOUBLE) { + char buf[326], *eptr; + double d; + + if ((size_t)len >= sizeof(buf)) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Double value is too large"); + return REDIS_ERR; + } + + memcpy(buf,p,len); + buf[len] = '\0'; + + if (len == 3 && strcasecmp(buf,"inf") == 0) { + d = INFINITY; /* Positive infinite. */ + } else if (len == 4 && strcasecmp(buf,"-inf") == 0) { + d = -INFINITY; /* Negative infinite. */ + } else { + d = strtod((char*)buf,&eptr); + /* RESP3 only allows "inf", "-inf", and finite values, while + * strtod() allows other variations on infinity, NaN, + * etc. We explicity handle our two allowed infinite cases + * above, so strtod() should only result in finite values. */ + if (buf[0] == '\0' || eptr != &buf[len] || !isfinite(d)) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bad double value"); + return REDIS_ERR; + } + } + + if (r->fn && r->fn->createDouble) { + obj = r->fn->createDouble(cur,d,buf,len); + } else { + obj = (void*)REDIS_REPLY_DOUBLE; + } + } else if (cur->type == REDIS_REPLY_NIL) { + if (len != 0) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bad nil value"); + return REDIS_ERR; + } + + if (r->fn && r->fn->createNil) + obj = r->fn->createNil(cur); + else + obj = (void*)REDIS_REPLY_NIL; + } else if (cur->type == REDIS_REPLY_BOOL) { + int bval; + + if (len != 1 || !strchr("tTfF", p[0])) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bad bool value"); + return REDIS_ERR; + } + + bval = p[0] == 't' || p[0] == 'T'; + if (r->fn && r->fn->createBool) + obj = r->fn->createBool(cur,bval); + else + obj = (void*)REDIS_REPLY_BOOL; + } else if (cur->type == REDIS_REPLY_BIGNUM) { + /* Ensure all characters are decimal digits (with possible leading + * minus sign). */ + for (int i = 0; i < len; i++) { + /* XXX Consider: Allow leading '+'? Error on leading '0's? */ + if (i == 0 && p[0] == '-') continue; + if (p[i] < '0' || p[i] > '9') { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bad bignum value"); + return REDIS_ERR; + } + } + if (r->fn && r->fn->createString) + obj = r->fn->createString(cur,p,len); + else + obj = (void*)REDIS_REPLY_BIGNUM; + } else { + /* Type will be error or status. */ + for (int i = 0; i < len; i++) { + if (p[i] == '\r' || p[i] == '\n') { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bad simple string value"); + return REDIS_ERR; + } + } + if (r->fn && r->fn->createString) + obj = r->fn->createString(cur,p,len); + else + obj = (void*)(size_t)(cur->type); + } + + if (obj == NULL) { + __redisReaderSetErrorOOM(r); + return REDIS_ERR; + } + + /* Set reply if this is the root object. */ + if (r->ridx == 0) r->reply = obj; + moveToNextTask(r); + return REDIS_OK; + } + + return REDIS_ERR; +} + +static int processBulkItem(redisReader *r) { + redisReadTask *cur = r->task[r->ridx]; + void *obj = NULL; + char *p, *s; + long long len; + unsigned long bytelen; + int success = 0; + + p = r->buf+r->pos; + s = seekNewline(p,r->len-r->pos); + if (s != NULL) { + p = r->buf+r->pos; + bytelen = s-(r->buf+r->pos)+2; /* include \r\n */ + + if (string2ll(p, bytelen - 2, &len) == REDIS_ERR) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bad bulk string length"); + return REDIS_ERR; + } + + if (len < -1 || (LLONG_MAX > SIZE_MAX && len > (long long)SIZE_MAX)) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bulk string length out of range"); + return REDIS_ERR; + } + + if (len == -1) { + /* The nil object can always be created. */ + if (r->fn && r->fn->createNil) + obj = r->fn->createNil(cur); + else + obj = (void*)REDIS_REPLY_NIL; + success = 1; + } else { + /* Only continue when the buffer contains the entire bulk item. */ + bytelen += len+2; /* include \r\n */ + if (r->pos+bytelen <= r->len) { + if ((cur->type == REDIS_REPLY_VERB && len < 4) || + (cur->type == REDIS_REPLY_VERB && s[5] != ':')) + { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Verbatim string 4 bytes of content type are " + "missing or incorrectly encoded."); + return REDIS_ERR; + } + if (r->fn && r->fn->createString) + obj = r->fn->createString(cur,s+2,len); + else + obj = (void*)(long)cur->type; + success = 1; + } + } + + /* Proceed when obj was created. */ + if (success) { + if (obj == NULL) { + __redisReaderSetErrorOOM(r); + return REDIS_ERR; + } + + r->pos += bytelen; + + /* Set reply if this is the root object. */ + if (r->ridx == 0) r->reply = obj; + moveToNextTask(r); + return REDIS_OK; + } + } + + return REDIS_ERR; +} + +static int redisReaderGrow(redisReader *r) { + redisReadTask **aux; + int newlen; + + /* Grow our stack size */ + newlen = r->tasks + REDIS_READER_STACK_SIZE; + aux = hi_realloc(r->task, sizeof(*r->task) * newlen); + if (aux == NULL) + goto oom; + + r->task = aux; + + /* Allocate new tasks */ + for (; r->tasks < newlen; r->tasks++) { + r->task[r->tasks] = hi_calloc(1, sizeof(**r->task)); + if (r->task[r->tasks] == NULL) + goto oom; + } + + return REDIS_OK; +oom: + __redisReaderSetErrorOOM(r); + return REDIS_ERR; +} + +/* Process the array, map and set types. */ +static int processAggregateItem(redisReader *r) { + redisReadTask *cur = r->task[r->ridx]; + void *obj; + char *p; + long long elements; + int root = 0, len; + + if (r->ridx == r->tasks - 1) { + if (redisReaderGrow(r) == REDIS_ERR) + return REDIS_ERR; + } + + if ((p = readLine(r,&len)) != NULL) { + if (string2ll(p, len, &elements) == REDIS_ERR) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bad multi-bulk length"); + return REDIS_ERR; + } + + root = (r->ridx == 0); + + if (elements < -1 || (LLONG_MAX > SIZE_MAX && elements > SIZE_MAX) || + (r->maxelements > 0 && elements > r->maxelements)) + { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Multi-bulk length out of range"); + return REDIS_ERR; + } + + if (elements == -1) { + if (r->fn && r->fn->createNil) + obj = r->fn->createNil(cur); + else + obj = (void*)REDIS_REPLY_NIL; + + if (obj == NULL) { + __redisReaderSetErrorOOM(r); + return REDIS_ERR; + } + + moveToNextTask(r); + } else { + if (cur->type == REDIS_REPLY_MAP) elements *= 2; + + if (r->fn && r->fn->createArray) + obj = r->fn->createArray(cur,elements); + else + obj = (void*)(long)cur->type; + + if (obj == NULL) { + __redisReaderSetErrorOOM(r); + return REDIS_ERR; + } + + /* Modify task stack when there are more than 0 elements. */ + if (elements > 0) { + cur->elements = elements; + cur->obj = obj; + r->ridx++; + r->task[r->ridx]->type = -1; + r->task[r->ridx]->elements = -1; + r->task[r->ridx]->idx = 0; + r->task[r->ridx]->obj = NULL; + r->task[r->ridx]->parent = cur; + r->task[r->ridx]->privdata = r->privdata; + } else { + moveToNextTask(r); + } + } + + /* Set reply if this is the root object. */ + if (root) r->reply = obj; + return REDIS_OK; + } + + return REDIS_ERR; +} + +static int processItem(redisReader *r) { + redisReadTask *cur = r->task[r->ridx]; + char *p; + + /* check if we need to read type */ + if (cur->type < 0) { + if ((p = readBytes(r,1)) != NULL) { + switch (p[0]) { + case '-': + cur->type = REDIS_REPLY_ERROR; + break; + case '+': + cur->type = REDIS_REPLY_STATUS; + break; + case ':': + cur->type = REDIS_REPLY_INTEGER; + break; + case ',': + cur->type = REDIS_REPLY_DOUBLE; + break; + case '_': + cur->type = REDIS_REPLY_NIL; + break; + case '$': + cur->type = REDIS_REPLY_STRING; + break; + case '*': + cur->type = REDIS_REPLY_ARRAY; + break; + case '%': + cur->type = REDIS_REPLY_MAP; + break; + case '~': + cur->type = REDIS_REPLY_SET; + break; + case '#': + cur->type = REDIS_REPLY_BOOL; + break; + case '=': + cur->type = REDIS_REPLY_VERB; + break; + case '>': + cur->type = REDIS_REPLY_PUSH; + break; + case '(': + cur->type = REDIS_REPLY_BIGNUM; + break; + default: + __redisReaderSetErrorProtocolByte(r,*p); + return REDIS_ERR; + } + } else { + /* could not consume 1 byte */ + return REDIS_ERR; + } + } + + /* process typed item */ + switch(cur->type) { + case REDIS_REPLY_ERROR: + case REDIS_REPLY_STATUS: + case REDIS_REPLY_INTEGER: + case REDIS_REPLY_DOUBLE: + case REDIS_REPLY_NIL: + case REDIS_REPLY_BOOL: + case REDIS_REPLY_BIGNUM: + return processLineItem(r); + case REDIS_REPLY_STRING: + case REDIS_REPLY_VERB: + return processBulkItem(r); + case REDIS_REPLY_ARRAY: + case REDIS_REPLY_MAP: + case REDIS_REPLY_SET: + case REDIS_REPLY_PUSH: + return processAggregateItem(r); + default: + assert(NULL); + return REDIS_ERR; /* Avoid warning. */ + } +} + +redisReader *redisReaderCreateWithFunctions(redisReplyObjectFunctions *fn) { + redisReader *r; + + r = hi_calloc(1,sizeof(redisReader)); + if (r == NULL) + return NULL; + + r->buf = hi_sdsempty(); + if (r->buf == NULL) + goto oom; + + r->task = hi_calloc(REDIS_READER_STACK_SIZE, sizeof(*r->task)); + if (r->task == NULL) + goto oom; + + for (; r->tasks < REDIS_READER_STACK_SIZE; r->tasks++) { + r->task[r->tasks] = hi_calloc(1, sizeof(**r->task)); + if (r->task[r->tasks] == NULL) + goto oom; + } + + r->fn = fn; + r->maxbuf = REDIS_READER_MAX_BUF; + r->maxelements = REDIS_READER_MAX_ARRAY_ELEMENTS; + r->ridx = -1; + + return r; +oom: + redisReaderFree(r); + return NULL; +} + +void redisReaderFree(redisReader *r) { + if (r == NULL) + return; + + if (r->reply != NULL && r->fn && r->fn->freeObject) + r->fn->freeObject(r->reply); + + if (r->task) { + /* We know r->task[i] is allocated if i < r->tasks */ + for (int i = 0; i < r->tasks; i++) { + hi_free(r->task[i]); + } + + hi_free(r->task); + } + + hi_sdsfree(r->buf); + hi_free(r); +} + +int redisReaderFeed(redisReader *r, const char *buf, size_t len) { + hisds newbuf; + + /* Return early when this reader is in an erroneous state. */ + if (r->err) + return REDIS_ERR; + + /* Copy the provided buffer. */ + if (buf != NULL && len >= 1) { + /* Destroy internal buffer when it is empty and is quite large. */ + if (r->len == 0 && r->maxbuf != 0 && hi_sdsavail(r->buf) > r->maxbuf) { + hi_sdsfree(r->buf); + r->buf = hi_sdsempty(); + if (r->buf == 0) goto oom; + + r->pos = 0; + } + + newbuf = hi_sdscatlen(r->buf,buf,len); + if (newbuf == NULL) goto oom; + + r->buf = newbuf; + r->len = hi_sdslen(r->buf); + } + + return REDIS_OK; +oom: + __redisReaderSetErrorOOM(r); + return REDIS_ERR; +} + +int redisReaderGetReply(redisReader *r, void **reply) { + /* Default target pointer to NULL. */ + if (reply != NULL) + *reply = NULL; + + /* Return early when this reader is in an erroneous state. */ + if (r->err) + return REDIS_ERR; + + /* When the buffer is empty, there will never be a reply. */ + if (r->len == 0) + return REDIS_OK; + + /* Set first item to process when the stack is empty. */ + if (r->ridx == -1) { + r->task[0]->type = -1; + r->task[0]->elements = -1; + r->task[0]->idx = -1; + r->task[0]->obj = NULL; + r->task[0]->parent = NULL; + r->task[0]->privdata = r->privdata; + r->ridx = 0; + } + + /* Process items in reply. */ + while (r->ridx >= 0) + if (processItem(r) != REDIS_OK) + break; + + /* Return ASAP when an error occurred. */ + if (r->err) + return REDIS_ERR; + + /* Discard part of the buffer when we've consumed at least 1k, to avoid + * doing unnecessary calls to memmove() in sds.c. */ + if (r->pos >= 1024) { + if (hi_sdsrange(r->buf,r->pos,-1) < 0) return REDIS_ERR; + r->pos = 0; + r->len = hi_sdslen(r->buf); + } + + /* Emit a reply when there is one. */ + if (r->ridx == -1) { + if (reply != NULL) { + *reply = r->reply; + } else if (r->reply != NULL && r->fn && r->fn->freeObject) { + r->fn->freeObject(r->reply); + } + r->reply = NULL; + } + return REDIS_OK; +} diff --git a/deps/hiredis/read.h b/deps/hiredis/read.h new file mode 100644 index 0000000..2d74d77 --- /dev/null +++ b/deps/hiredis/read.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef __HIREDIS_READ_H +#define __HIREDIS_READ_H +#include /* for size_t */ + +#define REDIS_ERR -1 +#define REDIS_OK 0 + +/* When an error occurs, the err flag in a context is set to hold the type of + * error that occurred. REDIS_ERR_IO means there was an I/O error and you + * should use the "errno" variable to find out what is wrong. + * For other values, the "errstr" field will hold a description. */ +#define REDIS_ERR_IO 1 /* Error in read or write */ +#define REDIS_ERR_EOF 3 /* End of file */ +#define REDIS_ERR_PROTOCOL 4 /* Protocol error */ +#define REDIS_ERR_OOM 5 /* Out of memory */ +#define REDIS_ERR_TIMEOUT 6 /* Timed out */ +#define REDIS_ERR_OTHER 2 /* Everything else... */ + +#define REDIS_REPLY_STRING 1 +#define REDIS_REPLY_ARRAY 2 +#define REDIS_REPLY_INTEGER 3 +#define REDIS_REPLY_NIL 4 +#define REDIS_REPLY_STATUS 5 +#define REDIS_REPLY_ERROR 6 +#define REDIS_REPLY_DOUBLE 7 +#define REDIS_REPLY_BOOL 8 +#define REDIS_REPLY_MAP 9 +#define REDIS_REPLY_SET 10 +#define REDIS_REPLY_ATTR 11 +#define REDIS_REPLY_PUSH 12 +#define REDIS_REPLY_BIGNUM 13 +#define REDIS_REPLY_VERB 14 + +/* Default max unused reader buffer. */ +#define REDIS_READER_MAX_BUF (1024*16) + +/* Default multi-bulk element limit */ +#define REDIS_READER_MAX_ARRAY_ELEMENTS ((1LL<<32) - 1) + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct redisReadTask { + int type; + long long elements; /* number of elements in multibulk container */ + int idx; /* index in parent (array) object */ + void *obj; /* holds user-generated value for a read task */ + struct redisReadTask *parent; /* parent task */ + void *privdata; /* user-settable arbitrary field */ +} redisReadTask; + +typedef struct redisReplyObjectFunctions { + void *(*createString)(const redisReadTask*, char*, size_t); + void *(*createArray)(const redisReadTask*, size_t); + void *(*createInteger)(const redisReadTask*, long long); + void *(*createDouble)(const redisReadTask*, double, char*, size_t); + void *(*createNil)(const redisReadTask*); + void *(*createBool)(const redisReadTask*, int); + void (*freeObject)(void*); +} redisReplyObjectFunctions; + +typedef struct redisReader { + int err; /* Error flags, 0 when there is no error */ + char errstr[128]; /* String representation of error when applicable */ + + char *buf; /* Read buffer */ + size_t pos; /* Buffer cursor */ + size_t len; /* Buffer length */ + size_t maxbuf; /* Max length of unused buffer */ + long long maxelements; /* Max multi-bulk elements */ + + redisReadTask **task; + int tasks; + + int ridx; /* Index of current read task */ + void *reply; /* Temporary reply pointer */ + + redisReplyObjectFunctions *fn; + void *privdata; +} redisReader; + +/* Public API for the protocol parser. */ +redisReader *redisReaderCreateWithFunctions(redisReplyObjectFunctions *fn); +void redisReaderFree(redisReader *r); +int redisReaderFeed(redisReader *r, const char *buf, size_t len); +int redisReaderGetReply(redisReader *r, void **reply); + +#define redisReaderSetPrivdata(_r, _p) (int)(((redisReader*)(_r))->privdata = (_p)) +#define redisReaderGetObject(_r) (((redisReader*)(_r))->reply) +#define redisReaderGetError(_r) (((redisReader*)(_r))->errstr) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/deps/hiredis/sds.c b/deps/hiredis/sds.c new file mode 100644 index 0000000..114fa49 --- /dev/null +++ b/deps/hiredis/sds.c @@ -0,0 +1,1289 @@ +/* SDSLib 2.0 -- A C dynamic strings library + * + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Oran Agra + * Copyright (c) 2015, Redis Labs, Inc + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include +#include +#include +#include +#include +#include +#include "sds.h" +#include "sdsalloc.h" + +static inline int hi_sdsHdrSize(char type) { + switch(type&HI_SDS_TYPE_MASK) { + case HI_SDS_TYPE_5: + return sizeof(struct hisdshdr5); + case HI_SDS_TYPE_8: + return sizeof(struct hisdshdr8); + case HI_SDS_TYPE_16: + return sizeof(struct hisdshdr16); + case HI_SDS_TYPE_32: + return sizeof(struct hisdshdr32); + case HI_SDS_TYPE_64: + return sizeof(struct hisdshdr64); + } + return 0; +} + +static inline char hi_sdsReqType(size_t string_size) { + if (string_size < 32) + return HI_SDS_TYPE_5; + if (string_size < 0xff) + return HI_SDS_TYPE_8; + if (string_size < 0xffff) + return HI_SDS_TYPE_16; + if (string_size < 0xffffffff) + return HI_SDS_TYPE_32; + return HI_SDS_TYPE_64; +} + +/* Create a new hisds string with the content specified by the 'init' pointer + * and 'initlen'. + * If NULL is used for 'init' the string is initialized with zero bytes. + * + * The string is always null-terminated (all the hisds strings are, always) so + * even if you create an hisds string with: + * + * mystring = hi_sdsnewlen("abc",3); + * + * You can print the string with printf() as there is an implicit \0 at the + * end of the string. However the string is binary safe and can contain + * \0 characters in the middle, as the length is stored in the hisds header. */ +hisds hi_sdsnewlen(const void *init, size_t initlen) { + void *sh; + hisds s; + char type = hi_sdsReqType(initlen); + /* Empty strings are usually created in order to append. Use type 8 + * since type 5 is not good at this. */ + if (type == HI_SDS_TYPE_5 && initlen == 0) type = HI_SDS_TYPE_8; + int hdrlen = hi_sdsHdrSize(type); + unsigned char *fp; /* flags pointer. */ + + sh = hi_s_malloc(hdrlen+initlen+1); + if (sh == NULL) return NULL; + if (!init) + memset(sh, 0, hdrlen+initlen+1); + s = (char*)sh+hdrlen; + fp = ((unsigned char*)s)-1; + switch(type) { + case HI_SDS_TYPE_5: { + *fp = type | (initlen << HI_SDS_TYPE_BITS); + break; + } + case HI_SDS_TYPE_8: { + HI_SDS_HDR_VAR(8,s); + sh->len = initlen; + sh->alloc = initlen; + *fp = type; + break; + } + case HI_SDS_TYPE_16: { + HI_SDS_HDR_VAR(16,s); + sh->len = initlen; + sh->alloc = initlen; + *fp = type; + break; + } + case HI_SDS_TYPE_32: { + HI_SDS_HDR_VAR(32,s); + sh->len = initlen; + sh->alloc = initlen; + *fp = type; + break; + } + case HI_SDS_TYPE_64: { + HI_SDS_HDR_VAR(64,s); + sh->len = initlen; + sh->alloc = initlen; + *fp = type; + break; + } + } + if (initlen && init) + memcpy(s, init, initlen); + s[initlen] = '\0'; + return s; +} + +/* Create an empty (zero length) hisds string. Even in this case the string + * always has an implicit null term. */ +hisds hi_sdsempty(void) { + return hi_sdsnewlen("",0); +} + +/* Create a new hisds string starting from a null terminated C string. */ +hisds hi_sdsnew(const char *init) { + size_t initlen = (init == NULL) ? 0 : strlen(init); + return hi_sdsnewlen(init, initlen); +} + +/* Duplicate an hisds string. */ +hisds hi_sdsdup(const hisds s) { + return hi_sdsnewlen(s, hi_sdslen(s)); +} + +/* Free an hisds string. No operation is performed if 's' is NULL. */ +void hi_sdsfree(hisds s) { + if (s == NULL) return; + hi_s_free((char*)s-hi_sdsHdrSize(s[-1])); +} + +/* Set the hisds string length to the length as obtained with strlen(), so + * considering as content only up to the first null term character. + * + * This function is useful when the hisds string is hacked manually in some + * way, like in the following example: + * + * s = hi_sdsnew("foobar"); + * s[2] = '\0'; + * hi_sdsupdatelen(s); + * printf("%d\n", hi_sdslen(s)); + * + * The output will be "2", but if we comment out the call to hi_sdsupdatelen() + * the output will be "6" as the string was modified but the logical length + * remains 6 bytes. */ +void hi_sdsupdatelen(hisds s) { + int reallen = strlen(s); + hi_sdssetlen(s, reallen); +} + +/* Modify an hisds string in-place to make it empty (zero length). + * However all the existing buffer is not discarded but set as free space + * so that next append operations will not require allocations up to the + * number of bytes previously available. */ +void hi_sdsclear(hisds s) { + hi_sdssetlen(s, 0); + s[0] = '\0'; +} + +/* Enlarge the free space at the end of the hisds string so that the caller + * is sure that after calling this function can overwrite up to addlen + * bytes after the end of the string, plus one more byte for nul term. + * + * Note: this does not change the *length* of the hisds string as returned + * by hi_sdslen(), but only the free buffer space we have. */ +hisds hi_sdsMakeRoomFor(hisds s, size_t addlen) { + void *sh, *newsh; + size_t avail = hi_sdsavail(s); + size_t len, newlen; + char type, oldtype = s[-1] & HI_SDS_TYPE_MASK; + int hdrlen; + + /* Return ASAP if there is enough space left. */ + if (avail >= addlen) return s; + + len = hi_sdslen(s); + sh = (char*)s-hi_sdsHdrSize(oldtype); + newlen = (len+addlen); + if (newlen < HI_SDS_MAX_PREALLOC) + newlen *= 2; + else + newlen += HI_SDS_MAX_PREALLOC; + + type = hi_sdsReqType(newlen); + + /* Don't use type 5: the user is appending to the string and type 5 is + * not able to remember empty space, so hi_sdsMakeRoomFor() must be called + * at every appending operation. */ + if (type == HI_SDS_TYPE_5) type = HI_SDS_TYPE_8; + + hdrlen = hi_sdsHdrSize(type); + if (oldtype==type) { + newsh = hi_s_realloc(sh, hdrlen+newlen+1); + if (newsh == NULL) return NULL; + s = (char*)newsh+hdrlen; + } else { + /* Since the header size changes, need to move the string forward, + * and can't use realloc */ + newsh = hi_s_malloc(hdrlen+newlen+1); + if (newsh == NULL) return NULL; + memcpy((char*)newsh+hdrlen, s, len+1); + hi_s_free(sh); + s = (char*)newsh+hdrlen; + s[-1] = type; + hi_sdssetlen(s, len); + } + hi_sdssetalloc(s, newlen); + return s; +} + +/* Reallocate the hisds string so that it has no free space at the end. The + * contained string remains not altered, but next concatenation operations + * will require a reallocation. + * + * After the call, the passed hisds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. */ +hisds hi_sdsRemoveFreeSpace(hisds s) { + void *sh, *newsh; + char type, oldtype = s[-1] & HI_SDS_TYPE_MASK; + int hdrlen; + size_t len = hi_sdslen(s); + sh = (char*)s-hi_sdsHdrSize(oldtype); + + type = hi_sdsReqType(len); + hdrlen = hi_sdsHdrSize(type); + if (oldtype==type) { + newsh = hi_s_realloc(sh, hdrlen+len+1); + if (newsh == NULL) return NULL; + s = (char*)newsh+hdrlen; + } else { + newsh = hi_s_malloc(hdrlen+len+1); + if (newsh == NULL) return NULL; + memcpy((char*)newsh+hdrlen, s, len+1); + hi_s_free(sh); + s = (char*)newsh+hdrlen; + s[-1] = type; + hi_sdssetlen(s, len); + } + hi_sdssetalloc(s, len); + return s; +} + +/* Return the total size of the allocation of the specifed hisds string, + * including: + * 1) The hisds header before the pointer. + * 2) The string. + * 3) The free buffer at the end if any. + * 4) The implicit null term. + */ +size_t hi_sdsAllocSize(hisds s) { + size_t alloc = hi_sdsalloc(s); + return hi_sdsHdrSize(s[-1])+alloc+1; +} + +/* Return the pointer of the actual SDS allocation (normally SDS strings + * are referenced by the start of the string buffer). */ +void *hi_sdsAllocPtr(hisds s) { + return (void*) (s-hi_sdsHdrSize(s[-1])); +} + +/* Increment the hisds length and decrements the left free space at the + * end of the string according to 'incr'. Also set the null term + * in the new end of the string. + * + * This function is used in order to fix the string length after the + * user calls hi_sdsMakeRoomFor(), writes something after the end of + * the current string, and finally needs to set the new length. + * + * Note: it is possible to use a negative increment in order to + * right-trim the string. + * + * Usage example: + * + * Using hi_sdsIncrLen() and hi_sdsMakeRoomFor() it is possible to mount the + * following schema, to cat bytes coming from the kernel to the end of an + * hisds string without copying into an intermediate buffer: + * + * oldlen = hi_hi_sdslen(s); + * s = hi_sdsMakeRoomFor(s, BUFFER_SIZE); + * nread = read(fd, s+oldlen, BUFFER_SIZE); + * ... check for nread <= 0 and handle it ... + * hi_sdsIncrLen(s, nread); + */ +void hi_sdsIncrLen(hisds s, int incr) { + unsigned char flags = s[-1]; + size_t len; + switch(flags&HI_SDS_TYPE_MASK) { + case HI_SDS_TYPE_5: { + unsigned char *fp = ((unsigned char*)s)-1; + unsigned char oldlen = HI_SDS_TYPE_5_LEN(flags); + assert((incr > 0 && oldlen+incr < 32) || (incr < 0 && oldlen >= (unsigned int)(-incr))); + *fp = HI_SDS_TYPE_5 | ((oldlen+incr) << HI_SDS_TYPE_BITS); + len = oldlen+incr; + break; + } + case HI_SDS_TYPE_8: { + HI_SDS_HDR_VAR(8,s); + assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); + len = (sh->len += incr); + break; + } + case HI_SDS_TYPE_16: { + HI_SDS_HDR_VAR(16,s); + assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); + len = (sh->len += incr); + break; + } + case HI_SDS_TYPE_32: { + HI_SDS_HDR_VAR(32,s); + assert((incr >= 0 && sh->alloc-sh->len >= (unsigned int)incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); + len = (sh->len += incr); + break; + } + case HI_SDS_TYPE_64: { + HI_SDS_HDR_VAR(64,s); + assert((incr >= 0 && sh->alloc-sh->len >= (uint64_t)incr) || (incr < 0 && sh->len >= (uint64_t)(-incr))); + len = (sh->len += incr); + break; + } + default: len = 0; /* Just to avoid compilation warnings. */ + } + s[len] = '\0'; +} + +/* Grow the hisds to have the specified length. Bytes that were not part of + * the original length of the hisds will be set to zero. + * + * if the specified length is smaller than the current length, no operation + * is performed. */ +hisds hi_sdsgrowzero(hisds s, size_t len) { + size_t curlen = hi_sdslen(s); + + if (len <= curlen) return s; + s = hi_sdsMakeRoomFor(s,len-curlen); + if (s == NULL) return NULL; + + /* Make sure added region doesn't contain garbage */ + memset(s+curlen,0,(len-curlen+1)); /* also set trailing \0 byte */ + hi_sdssetlen(s, len); + return s; +} + +/* Append the specified binary-safe string pointed by 't' of 'len' bytes to the + * end of the specified hisds string 's'. + * + * After the call, the passed hisds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. */ +hisds hi_sdscatlen(hisds s, const void *t, size_t len) { + size_t curlen = hi_sdslen(s); + + s = hi_sdsMakeRoomFor(s,len); + if (s == NULL) return NULL; + memcpy(s+curlen, t, len); + hi_sdssetlen(s, curlen+len); + s[curlen+len] = '\0'; + return s; +} + +/* Append the specified null termianted C string to the hisds string 's'. + * + * After the call, the passed hisds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. */ +hisds hi_sdscat(hisds s, const char *t) { + return hi_sdscatlen(s, t, strlen(t)); +} + +/* Append the specified hisds 't' to the existing hisds 's'. + * + * After the call, the modified hisds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. */ +hisds hi_sdscatsds(hisds s, const hisds t) { + return hi_sdscatlen(s, t, hi_sdslen(t)); +} + +/* Destructively modify the hisds string 's' to hold the specified binary + * safe string pointed by 't' of length 'len' bytes. */ +hisds hi_sdscpylen(hisds s, const char *t, size_t len) { + if (hi_sdsalloc(s) < len) { + s = hi_sdsMakeRoomFor(s,len-hi_sdslen(s)); + if (s == NULL) return NULL; + } + memcpy(s, t, len); + s[len] = '\0'; + hi_sdssetlen(s, len); + return s; +} + +/* Like hi_sdscpylen() but 't' must be a null-terminated string so that the length + * of the string is obtained with strlen(). */ +hisds hi_sdscpy(hisds s, const char *t) { + return hi_sdscpylen(s, t, strlen(t)); +} + +/* Helper for hi_sdscatlonglong() doing the actual number -> string + * conversion. 's' must point to a string with room for at least + * HI_SDS_LLSTR_SIZE bytes. + * + * The function returns the length of the null-terminated string + * representation stored at 's'. */ +#define HI_SDS_LLSTR_SIZE 21 +int hi_sdsll2str(char *s, long long value) { + char *p, aux; + unsigned long long v; + size_t l; + + /* Generate the string representation, this method produces + * an reversed string. */ + v = (value < 0) ? -value : value; + p = s; + do { + *p++ = '0'+(v%10); + v /= 10; + } while(v); + if (value < 0) *p++ = '-'; + + /* Compute length and add null term. */ + l = p-s; + *p = '\0'; + + /* Reverse the string. */ + p--; + while(s < p) { + aux = *s; + *s = *p; + *p = aux; + s++; + p--; + } + return l; +} + +/* Identical hi_sdsll2str(), but for unsigned long long type. */ +int hi_sdsull2str(char *s, unsigned long long v) { + char *p, aux; + size_t l; + + /* Generate the string representation, this method produces + * an reversed string. */ + p = s; + do { + *p++ = '0'+(v%10); + v /= 10; + } while(v); + + /* Compute length and add null term. */ + l = p-s; + *p = '\0'; + + /* Reverse the string. */ + p--; + while(s < p) { + aux = *s; + *s = *p; + *p = aux; + s++; + p--; + } + return l; +} + +/* Create an hisds string from a long long value. It is much faster than: + * + * hi_sdscatprintf(hi_sdsempty(),"%lld\n", value); + */ +hisds hi_sdsfromlonglong(long long value) { + char buf[HI_SDS_LLSTR_SIZE]; + int len = hi_sdsll2str(buf,value); + + return hi_sdsnewlen(buf,len); +} + +/* Like hi_sdscatprintf() but gets va_list instead of being variadic. */ +hisds hi_sdscatvprintf(hisds s, const char *fmt, va_list ap) { + va_list cpy; + char staticbuf[1024], *buf = staticbuf, *t; + size_t buflen = strlen(fmt)*2; + + /* We try to start using a static buffer for speed. + * If not possible we revert to heap allocation. */ + if (buflen > sizeof(staticbuf)) { + buf = hi_s_malloc(buflen); + if (buf == NULL) return NULL; + } else { + buflen = sizeof(staticbuf); + } + + /* Try with buffers two times bigger every time we fail to + * fit the string in the current buffer size. */ + while(1) { + buf[buflen-2] = '\0'; + va_copy(cpy,ap); + vsnprintf(buf, buflen, fmt, cpy); + va_end(cpy); + if (buf[buflen-2] != '\0') { + if (buf != staticbuf) hi_s_free(buf); + buflen *= 2; + buf = hi_s_malloc(buflen); + if (buf == NULL) return NULL; + continue; + } + break; + } + + /* Finally concat the obtained string to the SDS string and return it. */ + t = hi_sdscat(s, buf); + if (buf != staticbuf) hi_s_free(buf); + return t; +} + +/* Append to the hisds string 's' a string obtained using printf-alike format + * specifier. + * + * After the call, the modified hisds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. + * + * Example: + * + * s = hi_sdsnew("Sum is: "); + * s = hi_sdscatprintf(s,"%d+%d = %d",a,b,a+b). + * + * Often you need to create a string from scratch with the printf-alike + * format. When this is the need, just use hi_sdsempty() as the target string: + * + * s = hi_sdscatprintf(hi_sdsempty(), "... your format ...", args); + */ +hisds hi_sdscatprintf(hisds s, const char *fmt, ...) { + va_list ap; + char *t; + va_start(ap, fmt); + t = hi_sdscatvprintf(s,fmt,ap); + va_end(ap); + return t; +} + +/* This function is similar to hi_sdscatprintf, but much faster as it does + * not rely on sprintf() family functions implemented by the libc that + * are often very slow. Moreover directly handling the hisds string as + * new data is concatenated provides a performance improvement. + * + * However this function only handles an incompatible subset of printf-alike + * format specifiers: + * + * %s - C String + * %S - SDS string + * %i - signed int + * %I - 64 bit signed integer (long long, int64_t) + * %u - unsigned int + * %U - 64 bit unsigned integer (unsigned long long, uint64_t) + * %% - Verbatim "%" character. + */ +hisds hi_sdscatfmt(hisds s, char const *fmt, ...) { + const char *f = fmt; + int i; + va_list ap; + + va_start(ap,fmt); + i = hi_sdslen(s); /* Position of the next byte to write to dest str. */ + while(*f) { + char next, *str; + size_t l; + long long num; + unsigned long long unum; + + /* Make sure there is always space for at least 1 char. */ + if (hi_sdsavail(s)==0) { + s = hi_sdsMakeRoomFor(s,1); + if (s == NULL) goto fmt_error; + } + + switch(*f) { + case '%': + next = *(f+1); + f++; + switch(next) { + case 's': + case 'S': + str = va_arg(ap,char*); + l = (next == 's') ? strlen(str) : hi_sdslen(str); + if (hi_sdsavail(s) < l) { + s = hi_sdsMakeRoomFor(s,l); + if (s == NULL) goto fmt_error; + } + memcpy(s+i,str,l); + hi_sdsinclen(s,l); + i += l; + break; + case 'i': + case 'I': + if (next == 'i') + num = va_arg(ap,int); + else + num = va_arg(ap,long long); + { + char buf[HI_SDS_LLSTR_SIZE]; + l = hi_sdsll2str(buf,num); + if (hi_sdsavail(s) < l) { + s = hi_sdsMakeRoomFor(s,l); + if (s == NULL) goto fmt_error; + } + memcpy(s+i,buf,l); + hi_sdsinclen(s,l); + i += l; + } + break; + case 'u': + case 'U': + if (next == 'u') + unum = va_arg(ap,unsigned int); + else + unum = va_arg(ap,unsigned long long); + { + char buf[HI_SDS_LLSTR_SIZE]; + l = hi_sdsull2str(buf,unum); + if (hi_sdsavail(s) < l) { + s = hi_sdsMakeRoomFor(s,l); + if (s == NULL) goto fmt_error; + } + memcpy(s+i,buf,l); + hi_sdsinclen(s,l); + i += l; + } + break; + default: /* Handle %% and generally %. */ + s[i++] = next; + hi_sdsinclen(s,1); + break; + } + break; + default: + s[i++] = *f; + hi_sdsinclen(s,1); + break; + } + f++; + } + va_end(ap); + + /* Add null-term */ + s[i] = '\0'; + return s; + +fmt_error: + va_end(ap); + return NULL; +} + +/* Remove the part of the string from left and from right composed just of + * contiguous characters found in 'cset', that is a null terminted C string. + * + * After the call, the modified hisds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. + * + * Example: + * + * s = hi_sdsnew("AA...AA.a.aa.aHelloWorld :::"); + * s = hi_sdstrim(s,"Aa. :"); + * printf("%s\n", s); + * + * Output will be just "Hello World". + */ +hisds hi_sdstrim(hisds s, const char *cset) { + char *start, *end, *sp, *ep; + size_t len; + + sp = start = s; + ep = end = s+hi_sdslen(s)-1; + while(sp <= end && strchr(cset, *sp)) sp++; + while(ep > sp && strchr(cset, *ep)) ep--; + len = (sp > ep) ? 0 : ((ep-sp)+1); + if (s != sp) memmove(s, sp, len); + s[len] = '\0'; + hi_sdssetlen(s,len); + return s; +} + +/* Turn the string into a smaller (or equal) string containing only the + * substring specified by the 'start' and 'end' indexes. + * + * start and end can be negative, where -1 means the last character of the + * string, -2 the penultimate character, and so forth. + * + * The interval is inclusive, so the start and end characters will be part + * of the resulting string. + * + * The string is modified in-place. + * + * Return value: + * -1 (error) if hi_sdslen(s) is larger than maximum positive ssize_t value. + * 0 on success. + * + * Example: + * + * s = hi_sdsnew("Hello World"); + * hi_sdsrange(s,1,-1); => "ello World" + */ +int hi_sdsrange(hisds s, ssize_t start, ssize_t end) { + size_t newlen, len = hi_sdslen(s); + if (len > SSIZE_MAX) return -1; + + if (len == 0) return 0; + if (start < 0) { + start = len+start; + if (start < 0) start = 0; + } + if (end < 0) { + end = len+end; + if (end < 0) end = 0; + } + newlen = (start > end) ? 0 : (end-start)+1; + if (newlen != 0) { + if (start >= (ssize_t)len) { + newlen = 0; + } else if (end >= (ssize_t)len) { + end = len-1; + newlen = (start > end) ? 0 : (end-start)+1; + } + } else { + start = 0; + } + if (start && newlen) memmove(s, s+start, newlen); + s[newlen] = 0; + hi_sdssetlen(s,newlen); + return 0; +} + +/* Apply tolower() to every character of the hisds string 's'. */ +void hi_sdstolower(hisds s) { + int len = hi_sdslen(s), j; + + for (j = 0; j < len; j++) s[j] = tolower(s[j]); +} + +/* Apply toupper() to every character of the hisds string 's'. */ +void hi_sdstoupper(hisds s) { + int len = hi_sdslen(s), j; + + for (j = 0; j < len; j++) s[j] = toupper(s[j]); +} + +/* Compare two hisds strings s1 and s2 with memcmp(). + * + * Return value: + * + * positive if s1 > s2. + * negative if s1 < s2. + * 0 if s1 and s2 are exactly the same binary string. + * + * If two strings share exactly the same prefix, but one of the two has + * additional characters, the longer string is considered to be greater than + * the smaller one. */ +int hi_sdscmp(const hisds s1, const hisds s2) { + size_t l1, l2, minlen; + int cmp; + + l1 = hi_sdslen(s1); + l2 = hi_sdslen(s2); + minlen = (l1 < l2) ? l1 : l2; + cmp = memcmp(s1,s2,minlen); + if (cmp == 0) return l1-l2; + return cmp; +} + +/* Split 's' with separator in 'sep'. An array + * of hisds strings is returned. *count will be set + * by reference to the number of tokens returned. + * + * On out of memory, zero length string, zero length + * separator, NULL is returned. + * + * Note that 'sep' is able to split a string using + * a multi-character separator. For example + * hi_sdssplit("foo_-_bar","_-_"); will return two + * elements "foo" and "bar". + * + * This version of the function is binary-safe but + * requires length arguments. hi_sdssplit() is just the + * same function but for zero-terminated strings. + */ +hisds *hi_sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count) { + int elements = 0, slots = 5, start = 0, j; + hisds *tokens; + + if (seplen < 1 || len < 0) return NULL; + + tokens = hi_s_malloc(sizeof(hisds)*slots); + if (tokens == NULL) return NULL; + + if (len == 0) { + *count = 0; + return tokens; + } + for (j = 0; j < (len-(seplen-1)); j++) { + /* make sure there is room for the next element and the final one */ + if (slots < elements+2) { + hisds *newtokens; + + slots *= 2; + newtokens = hi_s_realloc(tokens,sizeof(hisds)*slots); + if (newtokens == NULL) goto cleanup; + tokens = newtokens; + } + /* search the separator */ + if ((seplen == 1 && *(s+j) == sep[0]) || (memcmp(s+j,sep,seplen) == 0)) { + tokens[elements] = hi_sdsnewlen(s+start,j-start); + if (tokens[elements] == NULL) goto cleanup; + elements++; + start = j+seplen; + j = j+seplen-1; /* skip the separator */ + } + } + /* Add the final element. We are sure there is room in the tokens array. */ + tokens[elements] = hi_sdsnewlen(s+start,len-start); + if (tokens[elements] == NULL) goto cleanup; + elements++; + *count = elements; + return tokens; + +cleanup: + { + int i; + for (i = 0; i < elements; i++) hi_sdsfree(tokens[i]); + hi_s_free(tokens); + *count = 0; + return NULL; + } +} + +/* Free the result returned by hi_sdssplitlen(), or do nothing if 'tokens' is NULL. */ +void hi_sdsfreesplitres(hisds *tokens, int count) { + if (!tokens) return; + while(count--) + hi_sdsfree(tokens[count]); + hi_s_free(tokens); +} + +/* Append to the hisds string "s" an escaped string representation where + * all the non-printable characters (tested with isprint()) are turned into + * escapes in the form "\n\r\a...." or "\x". + * + * After the call, the modified hisds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. */ +hisds hi_sdscatrepr(hisds s, const char *p, size_t len) { + s = hi_sdscatlen(s,"\"",1); + while(len--) { + switch(*p) { + case '\\': + case '"': + s = hi_sdscatprintf(s,"\\%c",*p); + break; + case '\n': s = hi_sdscatlen(s,"\\n",2); break; + case '\r': s = hi_sdscatlen(s,"\\r",2); break; + case '\t': s = hi_sdscatlen(s,"\\t",2); break; + case '\a': s = hi_sdscatlen(s,"\\a",2); break; + case '\b': s = hi_sdscatlen(s,"\\b",2); break; + default: + if (isprint(*p)) + s = hi_sdscatprintf(s,"%c",*p); + else + s = hi_sdscatprintf(s,"\\x%02x",(unsigned char)*p); + break; + } + p++; + } + return hi_sdscatlen(s,"\"",1); +} + +/* Helper function for hi_sdssplitargs() that converts a hex digit into an + * integer from 0 to 15 */ +static int hi_hex_digit_to_int(char c) { + switch(c) { + case '0': return 0; + case '1': return 1; + case '2': return 2; + case '3': return 3; + case '4': return 4; + case '5': return 5; + case '6': return 6; + case '7': return 7; + case '8': return 8; + case '9': return 9; + case 'a': case 'A': return 10; + case 'b': case 'B': return 11; + case 'c': case 'C': return 12; + case 'd': case 'D': return 13; + case 'e': case 'E': return 14; + case 'f': case 'F': return 15; + default: return 0; + } +} + +/* Split a line into arguments, where every argument can be in the + * following programming-language REPL-alike form: + * + * foo bar "newline are supported\n" and "\xff\x00otherstuff" + * + * The number of arguments is stored into *argc, and an array + * of hisds is returned. + * + * The caller should free the resulting array of hisds strings with + * hi_sdsfreesplitres(). + * + * Note that hi_sdscatrepr() is able to convert back a string into + * a quoted string in the same format hi_sdssplitargs() is able to parse. + * + * The function returns the allocated tokens on success, even when the + * input string is empty, or NULL if the input contains unbalanced + * quotes or closed quotes followed by non space characters + * as in: "foo"bar or "foo' + */ +hisds *hi_sdssplitargs(const char *line, int *argc) { + const char *p = line; + char *current = NULL; + char **vector = NULL; + + *argc = 0; + while(1) { + /* skip blanks */ + while(*p && isspace(*p)) p++; + if (*p) { + /* get a token */ + int inq=0; /* set to 1 if we are in "quotes" */ + int insq=0; /* set to 1 if we are in 'single quotes' */ + int done=0; + + if (current == NULL) current = hi_sdsempty(); + while(!done) { + if (inq) { + if (*p == '\\' && *(p+1) == 'x' && + isxdigit(*(p+2)) && + isxdigit(*(p+3))) + { + unsigned char byte; + + byte = (hi_hex_digit_to_int(*(p+2))*16)+ + hi_hex_digit_to_int(*(p+3)); + current = hi_sdscatlen(current,(char*)&byte,1); + p += 3; + } else if (*p == '\\' && *(p+1)) { + char c; + + p++; + switch(*p) { + case 'n': c = '\n'; break; + case 'r': c = '\r'; break; + case 't': c = '\t'; break; + case 'b': c = '\b'; break; + case 'a': c = '\a'; break; + default: c = *p; break; + } + current = hi_sdscatlen(current,&c,1); + } else if (*p == '"') { + /* closing quote must be followed by a space or + * nothing at all. */ + if (*(p+1) && !isspace(*(p+1))) goto err; + done=1; + } else if (!*p) { + /* unterminated quotes */ + goto err; + } else { + current = hi_sdscatlen(current,p,1); + } + } else if (insq) { + if (*p == '\\' && *(p+1) == '\'') { + p++; + current = hi_sdscatlen(current,"'",1); + } else if (*p == '\'') { + /* closing quote must be followed by a space or + * nothing at all. */ + if (*(p+1) && !isspace(*(p+1))) goto err; + done=1; + } else if (!*p) { + /* unterminated quotes */ + goto err; + } else { + current = hi_sdscatlen(current,p,1); + } + } else { + switch(*p) { + case ' ': + case '\n': + case '\r': + case '\t': + case '\0': + done=1; + break; + case '"': + inq=1; + break; + case '\'': + insq=1; + break; + default: + current = hi_sdscatlen(current,p,1); + break; + } + } + if (*p) p++; + } + /* add the token to the vector */ + { + char **new_vector = hi_s_realloc(vector,((*argc)+1)*sizeof(char*)); + if (new_vector == NULL) { + hi_s_free(vector); + return NULL; + } + + vector = new_vector; + vector[*argc] = current; + (*argc)++; + current = NULL; + } + } else { + /* Even on empty input string return something not NULL. */ + if (vector == NULL) vector = hi_s_malloc(sizeof(void*)); + return vector; + } + } + +err: + while((*argc)--) + hi_sdsfree(vector[*argc]); + hi_s_free(vector); + if (current) hi_sdsfree(current); + *argc = 0; + return NULL; +} + +/* Modify the string substituting all the occurrences of the set of + * characters specified in the 'from' string to the corresponding character + * in the 'to' array. + * + * For instance: hi_sdsmapchars(mystring, "ho", "01", 2) + * will have the effect of turning the string "hello" into "0ell1". + * + * The function returns the hisds string pointer, that is always the same + * as the input pointer since no resize is needed. */ +hisds hi_sdsmapchars(hisds s, const char *from, const char *to, size_t setlen) { + size_t j, i, l = hi_sdslen(s); + + for (j = 0; j < l; j++) { + for (i = 0; i < setlen; i++) { + if (s[j] == from[i]) { + s[j] = to[i]; + break; + } + } + } + return s; +} + +/* Join an array of C strings using the specified separator (also a C string). + * Returns the result as an hisds string. */ +hisds hi_sdsjoin(char **argv, int argc, char *sep) { + hisds join = hi_sdsempty(); + int j; + + for (j = 0; j < argc; j++) { + join = hi_sdscat(join, argv[j]); + if (j != argc-1) join = hi_sdscat(join,sep); + } + return join; +} + +/* Like hi_sdsjoin, but joins an array of SDS strings. */ +hisds hi_sdsjoinsds(hisds *argv, int argc, const char *sep, size_t seplen) { + hisds join = hi_sdsempty(); + int j; + + for (j = 0; j < argc; j++) { + join = hi_sdscatsds(join, argv[j]); + if (j != argc-1) join = hi_sdscatlen(join,sep,seplen); + } + return join; +} + +/* Wrappers to the allocators used by SDS. Note that SDS will actually + * just use the macros defined into sdsalloc.h in order to avoid to pay + * the overhead of function calls. Here we define these wrappers only for + * the programs SDS is linked to, if they want to touch the SDS internals + * even if they use a different allocator. */ +void *hi_sds_malloc(size_t size) { return hi_s_malloc(size); } +void *hi_sds_realloc(void *ptr, size_t size) { return hi_s_realloc(ptr,size); } +void hi_sds_free(void *ptr) { hi_s_free(ptr); } + +#if defined(HI_SDS_TEST_MAIN) +#include +#include "testhelp.h" +#include "limits.h" + +#define UNUSED(x) (void)(x) +int hi_sdsTest(void) { + { + hisds x = hi_sdsnew("foo"), y; + + test_cond("Create a string and obtain the length", + hi_sdslen(x) == 3 && memcmp(x,"foo\0",4) == 0) + + hi_sdsfree(x); + x = hi_sdsnewlen("foo",2); + test_cond("Create a string with specified length", + hi_sdslen(x) == 2 && memcmp(x,"fo\0",3) == 0) + + x = hi_sdscat(x,"bar"); + test_cond("Strings concatenation", + hi_sdslen(x) == 5 && memcmp(x,"fobar\0",6) == 0); + + x = hi_sdscpy(x,"a"); + test_cond("hi_sdscpy() against an originally longer string", + hi_sdslen(x) == 1 && memcmp(x,"a\0",2) == 0) + + x = hi_sdscpy(x,"xyzxxxxxxxxxxyyyyyyyyyykkkkkkkkkk"); + test_cond("hi_sdscpy() against an originally shorter string", + hi_sdslen(x) == 33 && + memcmp(x,"xyzxxxxxxxxxxyyyyyyyyyykkkkkkkkkk\0",33) == 0) + + hi_sdsfree(x); + x = hi_sdscatprintf(hi_sdsempty(),"%d",123); + test_cond("hi_sdscatprintf() seems working in the base case", + hi_sdslen(x) == 3 && memcmp(x,"123\0",4) == 0) + + hi_sdsfree(x); + x = hi_sdsnew("--"); + x = hi_sdscatfmt(x, "Hello %s World %I,%I--", "Hi!", LLONG_MIN,LLONG_MAX); + test_cond("hi_sdscatfmt() seems working in the base case", + hi_sdslen(x) == 60 && + memcmp(x,"--Hello Hi! World -9223372036854775808," + "9223372036854775807--",60) == 0) + printf("[%s]\n",x); + + hi_sdsfree(x); + x = hi_sdsnew("--"); + x = hi_sdscatfmt(x, "%u,%U--", UINT_MAX, ULLONG_MAX); + test_cond("hi_sdscatfmt() seems working with unsigned numbers", + hi_sdslen(x) == 35 && + memcmp(x,"--4294967295,18446744073709551615--",35) == 0) + + hi_sdsfree(x); + x = hi_sdsnew(" x "); + hi_sdstrim(x," x"); + test_cond("hi_sdstrim() works when all chars match", + hi_sdslen(x) == 0) + + hi_sdsfree(x); + x = hi_sdsnew(" x "); + hi_sdstrim(x," "); + test_cond("hi_sdstrim() works when a single char remains", + hi_sdslen(x) == 1 && x[0] == 'x') + + hi_sdsfree(x); + x = hi_sdsnew("xxciaoyyy"); + hi_sdstrim(x,"xy"); + test_cond("hi_sdstrim() correctly trims characters", + hi_sdslen(x) == 4 && memcmp(x,"ciao\0",5) == 0) + + y = hi_sdsdup(x); + hi_sdsrange(y,1,1); + test_cond("hi_sdsrange(...,1,1)", + hi_sdslen(y) == 1 && memcmp(y,"i\0",2) == 0) + + hi_sdsfree(y); + y = hi_sdsdup(x); + hi_sdsrange(y,1,-1); + test_cond("hi_sdsrange(...,1,-1)", + hi_sdslen(y) == 3 && memcmp(y,"iao\0",4) == 0) + + hi_sdsfree(y); + y = hi_sdsdup(x); + hi_sdsrange(y,-2,-1); + test_cond("hi_sdsrange(...,-2,-1)", + hi_sdslen(y) == 2 && memcmp(y,"ao\0",3) == 0) + + hi_sdsfree(y); + y = hi_sdsdup(x); + hi_sdsrange(y,2,1); + test_cond("hi_sdsrange(...,2,1)", + hi_sdslen(y) == 0 && memcmp(y,"\0",1) == 0) + + hi_sdsfree(y); + y = hi_sdsdup(x); + hi_sdsrange(y,1,100); + test_cond("hi_sdsrange(...,1,100)", + hi_sdslen(y) == 3 && memcmp(y,"iao\0",4) == 0) + + hi_sdsfree(y); + y = hi_sdsdup(x); + hi_sdsrange(y,100,100); + test_cond("hi_sdsrange(...,100,100)", + hi_sdslen(y) == 0 && memcmp(y,"\0",1) == 0) + + hi_sdsfree(y); + hi_sdsfree(x); + x = hi_sdsnew("foo"); + y = hi_sdsnew("foa"); + test_cond("hi_sdscmp(foo,foa)", hi_sdscmp(x,y) > 0) + + hi_sdsfree(y); + hi_sdsfree(x); + x = hi_sdsnew("bar"); + y = hi_sdsnew("bar"); + test_cond("hi_sdscmp(bar,bar)", hi_sdscmp(x,y) == 0) + + hi_sdsfree(y); + hi_sdsfree(x); + x = hi_sdsnew("aar"); + y = hi_sdsnew("bar"); + test_cond("hi_sdscmp(bar,bar)", hi_sdscmp(x,y) < 0) + + hi_sdsfree(y); + hi_sdsfree(x); + x = hi_sdsnewlen("\a\n\0foo\r",7); + y = hi_sdscatrepr(hi_sdsempty(),x,hi_sdslen(x)); + test_cond("hi_sdscatrepr(...data...)", + memcmp(y,"\"\\a\\n\\x00foo\\r\"",15) == 0) + + { + unsigned int oldfree; + char *p; + int step = 10, j, i; + + hi_sdsfree(x); + hi_sdsfree(y); + x = hi_sdsnew("0"); + test_cond("hi_sdsnew() free/len buffers", hi_sdslen(x) == 1 && hi_sdsavail(x) == 0); + + /* Run the test a few times in order to hit the first two + * SDS header types. */ + for (i = 0; i < 10; i++) { + int oldlen = hi_sdslen(x); + x = hi_sdsMakeRoomFor(x,step); + int type = x[-1]&HI_SDS_TYPE_MASK; + + test_cond("sdsMakeRoomFor() len", hi_sdslen(x) == oldlen); + if (type != HI_SDS_TYPE_5) { + test_cond("hi_sdsMakeRoomFor() free", hi_sdsavail(x) >= step); + oldfree = hi_sdsavail(x); + } + p = x+oldlen; + for (j = 0; j < step; j++) { + p[j] = 'A'+j; + } + hi_sdsIncrLen(x,step); + } + test_cond("hi_sdsMakeRoomFor() content", + memcmp("0ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ",x,101) == 0); + test_cond("sdsMakeRoomFor() final length",hi_sdslen(x)==101); + + hi_sdsfree(x); + } + } + test_report(); + return 0; +} +#endif + +#ifdef HI_SDS_TEST_MAIN +int main(void) { + return hi_sdsTest(); +} +#endif diff --git a/deps/hiredis/sds.h b/deps/hiredis/sds.h new file mode 100644 index 0000000..573d6dd --- /dev/null +++ b/deps/hiredis/sds.h @@ -0,0 +1,278 @@ +/* SDSLib 2.0 -- A C dynamic strings library + * + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Oran Agra + * Copyright (c) 2015, Redis Labs, Inc + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HIREDIS_SDS_H +#define HIREDIS_SDS_H + +#define HI_SDS_MAX_PREALLOC (1024*1024) +#ifdef _MSC_VER +#define __attribute__(x) +typedef long long ssize_t; +#define SSIZE_MAX (LLONG_MAX >> 1) +#endif + +#include +#include +#include + +typedef char *hisds; + +/* Note: sdshdr5 is never used, we just access the flags byte directly. + * However is here to document the layout of type 5 SDS strings. */ +struct __attribute__ ((__packed__)) hisdshdr5 { + unsigned char flags; /* 3 lsb of type, and 5 msb of string length */ + char buf[]; +}; +struct __attribute__ ((__packed__)) hisdshdr8 { + uint8_t len; /* used */ + uint8_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; +struct __attribute__ ((__packed__)) hisdshdr16 { + uint16_t len; /* used */ + uint16_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; +struct __attribute__ ((__packed__)) hisdshdr32 { + uint32_t len; /* used */ + uint32_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; +struct __attribute__ ((__packed__)) hisdshdr64 { + uint64_t len; /* used */ + uint64_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; + +#define HI_SDS_TYPE_5 0 +#define HI_SDS_TYPE_8 1 +#define HI_SDS_TYPE_16 2 +#define HI_SDS_TYPE_32 3 +#define HI_SDS_TYPE_64 4 +#define HI_SDS_TYPE_MASK 7 +#define HI_SDS_TYPE_BITS 3 +#define HI_SDS_HDR_VAR(T,s) struct hisdshdr##T *sh = (struct hisdshdr##T *)((s)-(sizeof(struct hisdshdr##T))); +#define HI_SDS_HDR(T,s) ((struct hisdshdr##T *)((s)-(sizeof(struct hisdshdr##T)))) +#define HI_SDS_TYPE_5_LEN(f) ((f)>>HI_SDS_TYPE_BITS) + +static inline size_t hi_sdslen(const hisds s) { + unsigned char flags = s[-1]; + switch(flags & HI_SDS_TYPE_MASK) { + case HI_SDS_TYPE_5: + return HI_SDS_TYPE_5_LEN(flags); + case HI_SDS_TYPE_8: + return HI_SDS_HDR(8,s)->len; + case HI_SDS_TYPE_16: + return HI_SDS_HDR(16,s)->len; + case HI_SDS_TYPE_32: + return HI_SDS_HDR(32,s)->len; + case HI_SDS_TYPE_64: + return HI_SDS_HDR(64,s)->len; + } + return 0; +} + +static inline size_t hi_sdsavail(const hisds s) { + unsigned char flags = s[-1]; + switch(flags&HI_SDS_TYPE_MASK) { + case HI_SDS_TYPE_5: { + return 0; + } + case HI_SDS_TYPE_8: { + HI_SDS_HDR_VAR(8,s); + return sh->alloc - sh->len; + } + case HI_SDS_TYPE_16: { + HI_SDS_HDR_VAR(16,s); + return sh->alloc - sh->len; + } + case HI_SDS_TYPE_32: { + HI_SDS_HDR_VAR(32,s); + return sh->alloc - sh->len; + } + case HI_SDS_TYPE_64: { + HI_SDS_HDR_VAR(64,s); + return sh->alloc - sh->len; + } + } + return 0; +} + +static inline void hi_sdssetlen(hisds s, size_t newlen) { + unsigned char flags = s[-1]; + switch(flags&HI_SDS_TYPE_MASK) { + case HI_SDS_TYPE_5: + { + unsigned char *fp = ((unsigned char*)s)-1; + *fp = (unsigned char)(HI_SDS_TYPE_5 | (newlen << HI_SDS_TYPE_BITS)); + } + break; + case HI_SDS_TYPE_8: + HI_SDS_HDR(8,s)->len = (uint8_t)newlen; + break; + case HI_SDS_TYPE_16: + HI_SDS_HDR(16,s)->len = (uint16_t)newlen; + break; + case HI_SDS_TYPE_32: + HI_SDS_HDR(32,s)->len = (uint32_t)newlen; + break; + case HI_SDS_TYPE_64: + HI_SDS_HDR(64,s)->len = (uint64_t)newlen; + break; + } +} + +static inline void hi_sdsinclen(hisds s, size_t inc) { + unsigned char flags = s[-1]; + switch(flags&HI_SDS_TYPE_MASK) { + case HI_SDS_TYPE_5: + { + unsigned char *fp = ((unsigned char*)s)-1; + unsigned char newlen = HI_SDS_TYPE_5_LEN(flags)+(unsigned char)inc; + *fp = HI_SDS_TYPE_5 | (newlen << HI_SDS_TYPE_BITS); + } + break; + case HI_SDS_TYPE_8: + HI_SDS_HDR(8,s)->len += (uint8_t)inc; + break; + case HI_SDS_TYPE_16: + HI_SDS_HDR(16,s)->len += (uint16_t)inc; + break; + case HI_SDS_TYPE_32: + HI_SDS_HDR(32,s)->len += (uint32_t)inc; + break; + case HI_SDS_TYPE_64: + HI_SDS_HDR(64,s)->len += (uint64_t)inc; + break; + } +} + +/* hi_sdsalloc() = hi_sdsavail() + hi_sdslen() */ +static inline size_t hi_sdsalloc(const hisds s) { + unsigned char flags = s[-1]; + switch(flags & HI_SDS_TYPE_MASK) { + case HI_SDS_TYPE_5: + return HI_SDS_TYPE_5_LEN(flags); + case HI_SDS_TYPE_8: + return HI_SDS_HDR(8,s)->alloc; + case HI_SDS_TYPE_16: + return HI_SDS_HDR(16,s)->alloc; + case HI_SDS_TYPE_32: + return HI_SDS_HDR(32,s)->alloc; + case HI_SDS_TYPE_64: + return HI_SDS_HDR(64,s)->alloc; + } + return 0; +} + +static inline void hi_sdssetalloc(hisds s, size_t newlen) { + unsigned char flags = s[-1]; + switch(flags&HI_SDS_TYPE_MASK) { + case HI_SDS_TYPE_5: + /* Nothing to do, this type has no total allocation info. */ + break; + case HI_SDS_TYPE_8: + HI_SDS_HDR(8,s)->alloc = (uint8_t)newlen; + break; + case HI_SDS_TYPE_16: + HI_SDS_HDR(16,s)->alloc = (uint16_t)newlen; + break; + case HI_SDS_TYPE_32: + HI_SDS_HDR(32,s)->alloc = (uint32_t)newlen; + break; + case HI_SDS_TYPE_64: + HI_SDS_HDR(64,s)->alloc = (uint64_t)newlen; + break; + } +} + +hisds hi_sdsnewlen(const void *init, size_t initlen); +hisds hi_sdsnew(const char *init); +hisds hi_sdsempty(void); +hisds hi_sdsdup(const hisds s); +void hi_sdsfree(hisds s); +hisds hi_sdsgrowzero(hisds s, size_t len); +hisds hi_sdscatlen(hisds s, const void *t, size_t len); +hisds hi_sdscat(hisds s, const char *t); +hisds hi_sdscatsds(hisds s, const hisds t); +hisds hi_sdscpylen(hisds s, const char *t, size_t len); +hisds hi_sdscpy(hisds s, const char *t); + +hisds hi_sdscatvprintf(hisds s, const char *fmt, va_list ap); +#ifdef __GNUC__ +hisds hi_sdscatprintf(hisds s, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +#else +hisds hi_sdscatprintf(hisds s, const char *fmt, ...); +#endif + +hisds hi_sdscatfmt(hisds s, char const *fmt, ...); +hisds hi_sdstrim(hisds s, const char *cset); +int hi_sdsrange(hisds s, ssize_t start, ssize_t end); +void hi_sdsupdatelen(hisds s); +void hi_sdsclear(hisds s); +int hi_sdscmp(const hisds s1, const hisds s2); +hisds *hi_sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count); +void hi_sdsfreesplitres(hisds *tokens, int count); +void hi_sdstolower(hisds s); +void hi_sdstoupper(hisds s); +hisds hi_sdsfromlonglong(long long value); +hisds hi_sdscatrepr(hisds s, const char *p, size_t len); +hisds *hi_sdssplitargs(const char *line, int *argc); +hisds hi_sdsmapchars(hisds s, const char *from, const char *to, size_t setlen); +hisds hi_sdsjoin(char **argv, int argc, char *sep); +hisds hi_sdsjoinsds(hisds *argv, int argc, const char *sep, size_t seplen); + +/* Low level functions exposed to the user API */ +hisds hi_sdsMakeRoomFor(hisds s, size_t addlen); +void hi_sdsIncrLen(hisds s, int incr); +hisds hi_sdsRemoveFreeSpace(hisds s); +size_t hi_sdsAllocSize(hisds s); +void *hi_sdsAllocPtr(hisds s); + +/* Export the allocator used by SDS to the program using SDS. + * Sometimes the program SDS is linked to, may use a different set of + * allocators, but may want to allocate or free things that SDS will + * respectively free or allocate. */ +void *hi_sds_malloc(size_t size); +void *hi_sds_realloc(void *ptr, size_t size); +void hi_sds_free(void *ptr); + +#ifdef REDIS_TEST +int hi_sdsTest(int argc, char *argv[]); +#endif + +#endif /* HIREDIS_SDS_H */ diff --git a/deps/hiredis/sdsalloc.h b/deps/hiredis/sdsalloc.h new file mode 100644 index 0000000..c9dcc3d --- /dev/null +++ b/deps/hiredis/sdsalloc.h @@ -0,0 +1,44 @@ +/* SDSLib 2.0 -- A C dynamic strings library + * + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Oran Agra + * Copyright (c) 2015, Redis Labs, Inc + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* SDS allocator selection. + * + * This file is used in order to change the SDS allocator at compile time. + * Just define the following defines to what you want to use. Also add + * the include of your alternate allocator if needed (not needed in order + * to use the default libc allocator). */ + +#include "alloc.h" + +#define hi_s_malloc hi_malloc +#define hi_s_realloc hi_realloc +#define hi_s_free hi_free diff --git a/deps/hiredis/sdscompat.h b/deps/hiredis/sdscompat.h new file mode 100644 index 0000000..e5a2574 --- /dev/null +++ b/deps/hiredis/sdscompat.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2020, Michael Grunder + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * SDS compatibility header. + * + * This simple file maps sds types and calls to their unique hiredis symbol names. + * It's useful when we build Hiredis as a dependency of Redis and want to call + * Hiredis' sds symbols rather than the ones built into Redis, as the libraries + * have slightly diverged and could cause hard to track down ABI incompatibility + * bugs. + * + */ + +#ifndef HIREDIS_SDS_COMPAT +#define HIREDIS_SDS_COMPAT + +#define sds hisds + +#define sdslen hi_sdslen +#define sdsavail hi_sdsavail +#define sdssetlen hi_sdssetlen +#define sdsinclen hi_sdsinclen +#define sdsalloc hi_sdsalloc +#define sdssetalloc hi_sdssetalloc + +#define sdsAllocPtr hi_sdsAllocPtr +#define sdsAllocSize hi_sdsAllocSize +#define sdscat hi_sdscat +#define sdscatfmt hi_sdscatfmt +#define sdscatlen hi_sdscatlen +#define sdscatprintf hi_sdscatprintf +#define sdscatrepr hi_sdscatrepr +#define sdscatsds hi_sdscatsds +#define sdscatvprintf hi_sdscatvprintf +#define sdsclear hi_sdsclear +#define sdscmp hi_sdscmp +#define sdscpy hi_sdscpy +#define sdscpylen hi_sdscpylen +#define sdsdup hi_sdsdup +#define sdsempty hi_sdsempty +#define sds_free hi_sds_free +#define sdsfree hi_sdsfree +#define sdsfreesplitres hi_sdsfreesplitres +#define sdsfromlonglong hi_sdsfromlonglong +#define sdsgrowzero hi_sdsgrowzero +#define sdsIncrLen hi_sdsIncrLen +#define sdsjoin hi_sdsjoin +#define sdsjoinsds hi_sdsjoinsds +#define sdsll2str hi_sdsll2str +#define sdsMakeRoomFor hi_sdsMakeRoomFor +#define sds_malloc hi_sds_malloc +#define sdsmapchars hi_sdsmapchars +#define sdsnew hi_sdsnew +#define sdsnewlen hi_sdsnewlen +#define sdsrange hi_sdsrange +#define sds_realloc hi_sds_realloc +#define sdsRemoveFreeSpace hi_sdsRemoveFreeSpace +#define sdssplitargs hi_sdssplitargs +#define sdssplitlen hi_sdssplitlen +#define sdstolower hi_sdstolower +#define sdstoupper hi_sdstoupper +#define sdstrim hi_sdstrim +#define sdsull2str hi_sdsull2str +#define sdsupdatelen hi_sdsupdatelen + +#endif /* HIREDIS_SDS_COMPAT */ diff --git a/deps/hiredis/sockcompat.c b/deps/hiredis/sockcompat.c new file mode 100644 index 0000000..f99d14b --- /dev/null +++ b/deps/hiredis/sockcompat.c @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2019, Marcus Geelnard + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define REDIS_SOCKCOMPAT_IMPLEMENTATION +#include "sockcompat.h" + +#ifdef _WIN32 +static int _wsaErrorToErrno(int err) { + switch (err) { + case WSAEWOULDBLOCK: + return EWOULDBLOCK; + case WSAEINPROGRESS: + return EINPROGRESS; + case WSAEALREADY: + return EALREADY; + case WSAENOTSOCK: + return ENOTSOCK; + case WSAEDESTADDRREQ: + return EDESTADDRREQ; + case WSAEMSGSIZE: + return EMSGSIZE; + case WSAEPROTOTYPE: + return EPROTOTYPE; + case WSAENOPROTOOPT: + return ENOPROTOOPT; + case WSAEPROTONOSUPPORT: + return EPROTONOSUPPORT; + case WSAEOPNOTSUPP: + return EOPNOTSUPP; + case WSAEAFNOSUPPORT: + return EAFNOSUPPORT; + case WSAEADDRINUSE: + return EADDRINUSE; + case WSAEADDRNOTAVAIL: + return EADDRNOTAVAIL; + case WSAENETDOWN: + return ENETDOWN; + case WSAENETUNREACH: + return ENETUNREACH; + case WSAENETRESET: + return ENETRESET; + case WSAECONNABORTED: + return ECONNABORTED; + case WSAECONNRESET: + return ECONNRESET; + case WSAENOBUFS: + return ENOBUFS; + case WSAEISCONN: + return EISCONN; + case WSAENOTCONN: + return ENOTCONN; + case WSAETIMEDOUT: + return ETIMEDOUT; + case WSAECONNREFUSED: + return ECONNREFUSED; + case WSAELOOP: + return ELOOP; + case WSAENAMETOOLONG: + return ENAMETOOLONG; + case WSAEHOSTUNREACH: + return EHOSTUNREACH; + case WSAENOTEMPTY: + return ENOTEMPTY; + default: + /* We just return a generic I/O error if we could not find a relevant error. */ + return EIO; + } +} + +static void _updateErrno(int success) { + errno = success ? 0 : _wsaErrorToErrno(WSAGetLastError()); +} + +static int _initWinsock() { + static int s_initialized = 0; + if (!s_initialized) { + static WSADATA wsadata; + int err = WSAStartup(MAKEWORD(2,2), &wsadata); + if (err != 0) { + errno = _wsaErrorToErrno(err); + return 0; + } + s_initialized = 1; + } + return 1; +} + +int win32_getaddrinfo(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res) { + /* Note: This function is likely to be called before other functions, so run init here. */ + if (!_initWinsock()) { + return EAI_FAIL; + } + + switch (getaddrinfo(node, service, hints, res)) { + case 0: return 0; + case WSATRY_AGAIN: return EAI_AGAIN; + case WSAEINVAL: return EAI_BADFLAGS; + case WSAEAFNOSUPPORT: return EAI_FAMILY; + case WSA_NOT_ENOUGH_MEMORY: return EAI_MEMORY; + case WSAHOST_NOT_FOUND: return EAI_NONAME; + case WSATYPE_NOT_FOUND: return EAI_SERVICE; + case WSAESOCKTNOSUPPORT: return EAI_SOCKTYPE; + default: return EAI_FAIL; /* Including WSANO_RECOVERY */ + } +} + +const char *win32_gai_strerror(int errcode) { + switch (errcode) { + case 0: errcode = 0; break; + case EAI_AGAIN: errcode = WSATRY_AGAIN; break; + case EAI_BADFLAGS: errcode = WSAEINVAL; break; + case EAI_FAMILY: errcode = WSAEAFNOSUPPORT; break; + case EAI_MEMORY: errcode = WSA_NOT_ENOUGH_MEMORY; break; + case EAI_NONAME: errcode = WSAHOST_NOT_FOUND; break; + case EAI_SERVICE: errcode = WSATYPE_NOT_FOUND; break; + case EAI_SOCKTYPE: errcode = WSAESOCKTNOSUPPORT; break; + default: errcode = WSANO_RECOVERY; break; /* Including EAI_FAIL */ + } + return gai_strerror(errcode); +} + +void win32_freeaddrinfo(struct addrinfo *res) { + freeaddrinfo(res); +} + +SOCKET win32_socket(int domain, int type, int protocol) { + SOCKET s; + + /* Note: This function is likely to be called before other functions, so run init here. */ + if (!_initWinsock()) { + return INVALID_SOCKET; + } + + _updateErrno((s = socket(domain, type, protocol)) != INVALID_SOCKET); + return s; +} + +int win32_ioctl(SOCKET fd, unsigned long request, unsigned long *argp) { + int ret = ioctlsocket(fd, (long)request, argp); + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +int win32_bind(SOCKET sockfd, const struct sockaddr *addr, socklen_t addrlen) { + int ret = bind(sockfd, addr, addrlen); + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +int win32_connect(SOCKET sockfd, const struct sockaddr *addr, socklen_t addrlen) { + int ret = connect(sockfd, addr, addrlen); + _updateErrno(ret != SOCKET_ERROR); + + /* For Winsock connect(), the WSAEWOULDBLOCK error means the same thing as + * EINPROGRESS for POSIX connect(), so we do that translation to keep POSIX + * logic consistent. */ + if (errno == EWOULDBLOCK) { + errno = EINPROGRESS; + } + + return ret != SOCKET_ERROR ? ret : -1; +} + +int win32_getsockopt(SOCKET sockfd, int level, int optname, void *optval, socklen_t *optlen) { + int ret = 0; + if ((level == SOL_SOCKET) && ((optname == SO_RCVTIMEO) || (optname == SO_SNDTIMEO))) { + if (*optlen >= sizeof (struct timeval)) { + struct timeval *tv = optval; + DWORD timeout = 0; + socklen_t dwlen = 0; + ret = getsockopt(sockfd, level, optname, (char *)&timeout, &dwlen); + tv->tv_sec = timeout / 1000; + tv->tv_usec = (timeout * 1000) % 1000000; + } else { + ret = WSAEFAULT; + } + *optlen = sizeof (struct timeval); + } else { + ret = getsockopt(sockfd, level, optname, (char*)optval, optlen); + } + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +int win32_setsockopt(SOCKET sockfd, int level, int optname, const void *optval, socklen_t optlen) { + int ret = 0; + if ((level == SOL_SOCKET) && ((optname == SO_RCVTIMEO) || (optname == SO_SNDTIMEO))) { + const struct timeval *tv = optval; + DWORD timeout = tv->tv_sec * 1000 + tv->tv_usec / 1000; + ret = setsockopt(sockfd, level, optname, (const char*)&timeout, sizeof(DWORD)); + } else { + ret = setsockopt(sockfd, level, optname, (const char*)optval, optlen); + } + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +int win32_close(SOCKET fd) { + int ret = closesocket(fd); + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +ssize_t win32_recv(SOCKET sockfd, void *buf, size_t len, int flags) { + int ret = recv(sockfd, (char*)buf, (int)len, flags); + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +ssize_t win32_send(SOCKET sockfd, const void *buf, size_t len, int flags) { + int ret = send(sockfd, (const char*)buf, (int)len, flags); + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +int win32_poll(struct pollfd *fds, nfds_t nfds, int timeout) { + int ret = WSAPoll(fds, nfds, timeout); + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} +#endif /* _WIN32 */ diff --git a/deps/hiredis/sockcompat.h b/deps/hiredis/sockcompat.h new file mode 100644 index 0000000..85810e8 --- /dev/null +++ b/deps/hiredis/sockcompat.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2019, Marcus Geelnard + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SOCKCOMPAT_H +#define __SOCKCOMPAT_H + +#ifndef _WIN32 +/* For POSIX systems we use the standard BSD socket API. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#else +/* For Windows we use winsock. */ +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0600 /* To get WSAPoll etc. */ +#include +#include +#include +#include + +#ifdef _MSC_VER +typedef long long ssize_t; +#endif + +/* Emulate the parts of the BSD socket API that we need (override the winsock signatures). */ +int win32_getaddrinfo(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res); +const char *win32_gai_strerror(int errcode); +void win32_freeaddrinfo(struct addrinfo *res); +SOCKET win32_socket(int domain, int type, int protocol); +int win32_ioctl(SOCKET fd, unsigned long request, unsigned long *argp); +int win32_bind(SOCKET sockfd, const struct sockaddr *addr, socklen_t addrlen); +int win32_connect(SOCKET sockfd, const struct sockaddr *addr, socklen_t addrlen); +int win32_getsockopt(SOCKET sockfd, int level, int optname, void *optval, socklen_t *optlen); +int win32_setsockopt(SOCKET sockfd, int level, int optname, const void *optval, socklen_t optlen); +int win32_close(SOCKET fd); +ssize_t win32_recv(SOCKET sockfd, void *buf, size_t len, int flags); +ssize_t win32_send(SOCKET sockfd, const void *buf, size_t len, int flags); +typedef ULONG nfds_t; +int win32_poll(struct pollfd *fds, nfds_t nfds, int timeout); + +#ifndef REDIS_SOCKCOMPAT_IMPLEMENTATION +#define getaddrinfo(node, service, hints, res) win32_getaddrinfo(node, service, hints, res) +#undef gai_strerror +#define gai_strerror(errcode) win32_gai_strerror(errcode) +#define freeaddrinfo(res) win32_freeaddrinfo(res) +#define socket(domain, type, protocol) win32_socket(domain, type, protocol) +#define ioctl(fd, request, argp) win32_ioctl(fd, request, argp) +#define bind(sockfd, addr, addrlen) win32_bind(sockfd, addr, addrlen) +#define connect(sockfd, addr, addrlen) win32_connect(sockfd, addr, addrlen) +#define getsockopt(sockfd, level, optname, optval, optlen) win32_getsockopt(sockfd, level, optname, optval, optlen) +#define setsockopt(sockfd, level, optname, optval, optlen) win32_setsockopt(sockfd, level, optname, optval, optlen) +#define close(fd) win32_close(fd) +#define recv(sockfd, buf, len, flags) win32_recv(sockfd, buf, len, flags) +#define send(sockfd, buf, len, flags) win32_send(sockfd, buf, len, flags) +#define poll(fds, nfds, timeout) win32_poll(fds, nfds, timeout) +#endif /* REDIS_SOCKCOMPAT_IMPLEMENTATION */ +#endif /* _WIN32 */ + +#endif /* __SOCKCOMPAT_H */ diff --git a/deps/hiredis/ssl.c b/deps/hiredis/ssl.c new file mode 100644 index 0000000..c10af7c --- /dev/null +++ b/deps/hiredis/ssl.c @@ -0,0 +1,569 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2011, Pieter Noordhuis + * Copyright (c) 2019, Redis Labs + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "hiredis.h" +#include "async.h" + +#include +#include +#include +#ifdef _WIN32 +#include +#include +#else +#include +#endif + +#include +#include + +#include "win32.h" +#include "async_private.h" +#include "hiredis_ssl.h" + +void __redisSetError(redisContext *c, int type, const char *str); + +struct redisSSLContext { + /* Associated OpenSSL SSL_CTX as created by redisCreateSSLContext() */ + SSL_CTX *ssl_ctx; + + /* Requested SNI, or NULL */ + char *server_name; +}; + +/* The SSL connection context is attached to SSL/TLS connections as a privdata. */ +typedef struct redisSSL { + /** + * OpenSSL SSL object. + */ + SSL *ssl; + + /** + * SSL_write() requires to be called again with the same arguments it was + * previously called with in the event of an SSL_read/SSL_write situation + */ + size_t lastLen; + + /** Whether the SSL layer requires read (possibly before a write) */ + int wantRead; + + /** + * Whether a write was requested prior to a read. If set, the write() + * should resume whenever a read takes place, if possible + */ + int pendingWrite; +} redisSSL; + +/* Forward declaration */ +redisContextFuncs redisContextSSLFuncs; + +/** + * OpenSSL global initialization and locking handling callbacks. + * Note that this is only required for OpenSSL < 1.1.0. + */ + +#if OPENSSL_VERSION_NUMBER < 0x10100000L +#define HIREDIS_USE_CRYPTO_LOCKS +#endif + +#ifdef HIREDIS_USE_CRYPTO_LOCKS +#ifdef _WIN32 +typedef CRITICAL_SECTION sslLockType; +static void sslLockInit(sslLockType* l) { + InitializeCriticalSection(l); +} +static void sslLockAcquire(sslLockType* l) { + EnterCriticalSection(l); +} +static void sslLockRelease(sslLockType* l) { + LeaveCriticalSection(l); +} +#else +typedef pthread_mutex_t sslLockType; +static void sslLockInit(sslLockType *l) { + pthread_mutex_init(l, NULL); +} +static void sslLockAcquire(sslLockType *l) { + pthread_mutex_lock(l); +} +static void sslLockRelease(sslLockType *l) { + pthread_mutex_unlock(l); +} +#endif + +static sslLockType* ossl_locks; + +static void opensslDoLock(int mode, int lkid, const char *f, int line) { + sslLockType *l = ossl_locks + lkid; + + if (mode & CRYPTO_LOCK) { + sslLockAcquire(l); + } else { + sslLockRelease(l); + } + + (void)f; + (void)line; +} + +static int initOpensslLocks(void) { + unsigned ii, nlocks; + if (CRYPTO_get_locking_callback() != NULL) { + /* Someone already set the callback before us. Don't destroy it! */ + return REDIS_OK; + } + nlocks = CRYPTO_num_locks(); + ossl_locks = hi_malloc(sizeof(*ossl_locks) * nlocks); + if (ossl_locks == NULL) + return REDIS_ERR; + + for (ii = 0; ii < nlocks; ii++) { + sslLockInit(ossl_locks + ii); + } + CRYPTO_set_locking_callback(opensslDoLock); + return REDIS_OK; +} +#endif /* HIREDIS_USE_CRYPTO_LOCKS */ + +int redisInitOpenSSL(void) +{ + SSL_library_init(); +#ifdef HIREDIS_USE_CRYPTO_LOCKS + initOpensslLocks(); +#endif + + return REDIS_OK; +} + +/** + * redisSSLContext helper context destruction. + */ + +const char *redisSSLContextGetError(redisSSLContextError error) +{ + switch (error) { + case REDIS_SSL_CTX_NONE: + return "No Error"; + case REDIS_SSL_CTX_CREATE_FAILED: + return "Failed to create OpenSSL SSL_CTX"; + case REDIS_SSL_CTX_CERT_KEY_REQUIRED: + return "Client cert and key must both be specified or skipped"; + case REDIS_SSL_CTX_CA_CERT_LOAD_FAILED: + return "Failed to load CA Certificate or CA Path"; + case REDIS_SSL_CTX_CLIENT_CERT_LOAD_FAILED: + return "Failed to load client certificate"; + case REDIS_SSL_CTX_PRIVATE_KEY_LOAD_FAILED: + return "Failed to load private key"; + case REDIS_SSL_CTX_OS_CERTSTORE_OPEN_FAILED: + return "Failed to open system certificate store"; + case REDIS_SSL_CTX_OS_CERT_ADD_FAILED: + return "Failed to add CA certificates obtained from system to the SSL context"; + default: + return "Unknown error code"; + } +} + +void redisFreeSSLContext(redisSSLContext *ctx) +{ + if (!ctx) + return; + + if (ctx->server_name) { + hi_free(ctx->server_name); + ctx->server_name = NULL; + } + + if (ctx->ssl_ctx) { + SSL_CTX_free(ctx->ssl_ctx); + ctx->ssl_ctx = NULL; + } + + hi_free(ctx); +} + + +/** + * redisSSLContext helper context initialization. + */ + +redisSSLContext *redisCreateSSLContext(const char *cacert_filename, const char *capath, + const char *cert_filename, const char *private_key_filename, + const char *server_name, redisSSLContextError *error) +{ +#ifdef _WIN32 + HCERTSTORE win_store = NULL; + PCCERT_CONTEXT win_ctx = NULL; +#endif + + redisSSLContext *ctx = hi_calloc(1, sizeof(redisSSLContext)); + if (ctx == NULL) + goto error; + + ctx->ssl_ctx = SSL_CTX_new(SSLv23_client_method()); + if (!ctx->ssl_ctx) { + if (error) *error = REDIS_SSL_CTX_CREATE_FAILED; + goto error; + } + + SSL_CTX_set_options(ctx->ssl_ctx, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3); + SSL_CTX_set_verify(ctx->ssl_ctx, SSL_VERIFY_PEER, NULL); + + if ((cert_filename != NULL && private_key_filename == NULL) || + (private_key_filename != NULL && cert_filename == NULL)) { + if (error) *error = REDIS_SSL_CTX_CERT_KEY_REQUIRED; + goto error; + } + + if (capath || cacert_filename) { +#ifdef _WIN32 + if (0 == strcmp(cacert_filename, "wincert")) { + win_store = CertOpenSystemStore(NULL, "Root"); + if (!win_store) { + if (error) *error = REDIS_SSL_CTX_OS_CERTSTORE_OPEN_FAILED; + goto error; + } + X509_STORE* store = SSL_CTX_get_cert_store(ctx->ssl_ctx); + while (win_ctx = CertEnumCertificatesInStore(win_store, win_ctx)) { + X509* x509 = NULL; + x509 = d2i_X509(NULL, (const unsigned char**)&win_ctx->pbCertEncoded, win_ctx->cbCertEncoded); + if (x509) { + if ((1 != X509_STORE_add_cert(store, x509)) || + (1 != SSL_CTX_add_client_CA(ctx->ssl_ctx, x509))) + { + if (error) *error = REDIS_SSL_CTX_OS_CERT_ADD_FAILED; + goto error; + } + X509_free(x509); + } + } + CertFreeCertificateContext(win_ctx); + CertCloseStore(win_store, 0); + } else +#endif + if (!SSL_CTX_load_verify_locations(ctx->ssl_ctx, cacert_filename, capath)) { + if (error) *error = REDIS_SSL_CTX_CA_CERT_LOAD_FAILED; + goto error; + } + } + + if (cert_filename) { + if (!SSL_CTX_use_certificate_chain_file(ctx->ssl_ctx, cert_filename)) { + if (error) *error = REDIS_SSL_CTX_CLIENT_CERT_LOAD_FAILED; + goto error; + } + if (!SSL_CTX_use_PrivateKey_file(ctx->ssl_ctx, private_key_filename, SSL_FILETYPE_PEM)) { + if (error) *error = REDIS_SSL_CTX_PRIVATE_KEY_LOAD_FAILED; + goto error; + } + } + + if (server_name) + ctx->server_name = hi_strdup(server_name); + + return ctx; + +error: +#ifdef _WIN32 + CertFreeCertificateContext(win_ctx); + CertCloseStore(win_store, 0); +#endif + redisFreeSSLContext(ctx); + return NULL; +} + +/** + * SSL Connection initialization. + */ + + +static int redisSSLConnect(redisContext *c, SSL *ssl) { + if (c->privctx) { + __redisSetError(c, REDIS_ERR_OTHER, "redisContext was already associated"); + return REDIS_ERR; + } + + redisSSL *rssl = hi_calloc(1, sizeof(redisSSL)); + if (rssl == NULL) { + __redisSetError(c, REDIS_ERR_OOM, "Out of memory"); + return REDIS_ERR; + } + + c->funcs = &redisContextSSLFuncs; + rssl->ssl = ssl; + + SSL_set_mode(rssl->ssl, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); + SSL_set_fd(rssl->ssl, c->fd); + SSL_set_connect_state(rssl->ssl); + + ERR_clear_error(); + int rv = SSL_connect(rssl->ssl); + if (rv == 1) { + c->privctx = rssl; + return REDIS_OK; + } + + rv = SSL_get_error(rssl->ssl, rv); + if (((c->flags & REDIS_BLOCK) == 0) && + (rv == SSL_ERROR_WANT_READ || rv == SSL_ERROR_WANT_WRITE)) { + c->privctx = rssl; + return REDIS_OK; + } + + if (c->err == 0) { + char err[512]; + if (rv == SSL_ERROR_SYSCALL) + snprintf(err,sizeof(err)-1,"SSL_connect failed: %s",strerror(errno)); + else { + unsigned long e = ERR_peek_last_error(); + snprintf(err,sizeof(err)-1,"SSL_connect failed: %s", + ERR_reason_error_string(e)); + } + __redisSetError(c, REDIS_ERR_IO, err); + } + + hi_free(rssl); + return REDIS_ERR; +} + +/** + * A wrapper around redisSSLConnect() for users who manage their own context and + * create their own SSL object. + */ + +int redisInitiateSSL(redisContext *c, SSL *ssl) { + return redisSSLConnect(c, ssl); +} + +/** + * A wrapper around redisSSLConnect() for users who use redisSSLContext and don't + * manage their own SSL objects. + */ + +int redisInitiateSSLWithContext(redisContext *c, redisSSLContext *redis_ssl_ctx) +{ + if (!c || !redis_ssl_ctx) + return REDIS_ERR; + + /* We want to verify that redisSSLConnect() won't fail on this, as it will + * not own the SSL object in that case and we'll end up leaking. + */ + if (c->privctx) + return REDIS_ERR; + + SSL *ssl = SSL_new(redis_ssl_ctx->ssl_ctx); + if (!ssl) { + __redisSetError(c, REDIS_ERR_OTHER, "Couldn't create new SSL instance"); + goto error; + } + + if (redis_ssl_ctx->server_name) { + if (!SSL_set_tlsext_host_name(ssl, redis_ssl_ctx->server_name)) { + __redisSetError(c, REDIS_ERR_OTHER, "Failed to set server_name/SNI"); + goto error; + } + } + + if (redisSSLConnect(c, ssl) != REDIS_OK) { + goto error; + } + + return REDIS_OK; + +error: + if (ssl) + SSL_free(ssl); + return REDIS_ERR; +} + +static int maybeCheckWant(redisSSL *rssl, int rv) { + /** + * If the error is WANT_READ or WANT_WRITE, the appropriate flags are set + * and true is returned. False is returned otherwise + */ + if (rv == SSL_ERROR_WANT_READ) { + rssl->wantRead = 1; + return 1; + } else if (rv == SSL_ERROR_WANT_WRITE) { + rssl->pendingWrite = 1; + return 1; + } else { + return 0; + } +} + +/** + * Implementation of redisContextFuncs for SSL connections. + */ + +static void redisSSLFree(void *privctx){ + redisSSL *rsc = privctx; + + if (!rsc) return; + if (rsc->ssl) { + SSL_free(rsc->ssl); + rsc->ssl = NULL; + } + hi_free(rsc); +} + +static ssize_t redisSSLRead(redisContext *c, char *buf, size_t bufcap) { + redisSSL *rssl = c->privctx; + + int nread = SSL_read(rssl->ssl, buf, bufcap); + if (nread > 0) { + return nread; + } else if (nread == 0) { + __redisSetError(c, REDIS_ERR_EOF, "Server closed the connection"); + return -1; + } else { + int err = SSL_get_error(rssl->ssl, nread); + if (c->flags & REDIS_BLOCK) { + /** + * In blocking mode, we should never end up in a situation where + * we get an error without it being an actual error, except + * in the case of EINTR, which can be spuriously received from + * debuggers or whatever. + */ + if (errno == EINTR) { + return 0; + } else { + const char *msg = NULL; + if (errno == EAGAIN) { + msg = "Resource temporarily unavailable"; + } + __redisSetError(c, REDIS_ERR_IO, msg); + return -1; + } + } + + /** + * We can very well get an EWOULDBLOCK/EAGAIN, however + */ + if (maybeCheckWant(rssl, err)) { + return 0; + } else { + __redisSetError(c, REDIS_ERR_IO, NULL); + return -1; + } + } +} + +static ssize_t redisSSLWrite(redisContext *c) { + redisSSL *rssl = c->privctx; + + size_t len = rssl->lastLen ? rssl->lastLen : hi_sdslen(c->obuf); + int rv = SSL_write(rssl->ssl, c->obuf, len); + + if (rv > 0) { + rssl->lastLen = 0; + } else if (rv < 0) { + rssl->lastLen = len; + + int err = SSL_get_error(rssl->ssl, rv); + if ((c->flags & REDIS_BLOCK) == 0 && maybeCheckWant(rssl, err)) { + return 0; + } else { + __redisSetError(c, REDIS_ERR_IO, NULL); + return -1; + } + } + return rv; +} + +static void redisSSLAsyncRead(redisAsyncContext *ac) { + int rv; + redisSSL *rssl = ac->c.privctx; + redisContext *c = &ac->c; + + rssl->wantRead = 0; + + if (rssl->pendingWrite) { + int done; + + /* This is probably just a write event */ + rssl->pendingWrite = 0; + rv = redisBufferWrite(c, &done); + if (rv == REDIS_ERR) { + __redisAsyncDisconnect(ac); + return; + } else if (!done) { + _EL_ADD_WRITE(ac); + } + } + + rv = redisBufferRead(c); + if (rv == REDIS_ERR) { + __redisAsyncDisconnect(ac); + } else { + _EL_ADD_READ(ac); + redisProcessCallbacks(ac); + } +} + +static void redisSSLAsyncWrite(redisAsyncContext *ac) { + int rv, done = 0; + redisSSL *rssl = ac->c.privctx; + redisContext *c = &ac->c; + + rssl->pendingWrite = 0; + rv = redisBufferWrite(c, &done); + if (rv == REDIS_ERR) { + __redisAsyncDisconnect(ac); + return; + } + + if (!done) { + if (rssl->wantRead) { + /* Need to read-before-write */ + rssl->pendingWrite = 1; + _EL_DEL_WRITE(ac); + } else { + /* No extra reads needed, just need to write more */ + _EL_ADD_WRITE(ac); + } + } else { + /* Already done! */ + _EL_DEL_WRITE(ac); + } + + /* Always reschedule a read */ + _EL_ADD_READ(ac); +} + +redisContextFuncs redisContextSSLFuncs = { + .free_privctx = redisSSLFree, + .async_read = redisSSLAsyncRead, + .async_write = redisSSLAsyncWrite, + .read = redisSSLRead, + .write = redisSSLWrite +}; + diff --git a/deps/hiredis/test.c b/deps/hiredis/test.c new file mode 100644 index 0000000..306aa55 --- /dev/null +++ b/deps/hiredis/test.c @@ -0,0 +1,2041 @@ +#include "fmacros.h" +#include "sockcompat.h" +#include +#include +#include +#ifndef _WIN32 +#include +#include +#endif +#include +#include +#include +#include +#include + +#include "hiredis.h" +#include "async.h" +#ifdef HIREDIS_TEST_SSL +#include "hiredis_ssl.h" +#endif +#ifdef HIREDIS_TEST_ASYNC +#include "adapters/libevent.h" +#include +#endif +#include "net.h" +#include "win32.h" + +enum connection_type { + CONN_TCP, + CONN_UNIX, + CONN_FD, + CONN_SSL +}; + +struct config { + enum connection_type type; + + struct { + const char *host; + int port; + struct timeval timeout; + } tcp; + + struct { + const char *path; + } unix_sock; + + struct { + const char *host; + int port; + const char *ca_cert; + const char *cert; + const char *key; + } ssl; +}; + +struct privdata { + int dtor_counter; +}; + +struct pushCounters { + int nil; + int str; +}; + +static int insecure_calloc_calls; + +#ifdef HIREDIS_TEST_SSL +redisSSLContext *_ssl_ctx = NULL; +#endif + +/* The following lines make up our testing "framework" :) */ +static int tests = 0, fails = 0, skips = 0; +#define test(_s) { printf("#%02d ", ++tests); printf(_s); } +#define test_cond(_c) if(_c) printf("\033[0;32mPASSED\033[0;0m\n"); else {printf("\033[0;31mFAILED\033[0;0m\n"); fails++;} +#define test_skipped() { printf("\033[01;33mSKIPPED\033[0;0m\n"); skips++; } + +static long long usec(void) { +#ifndef _MSC_VER + struct timeval tv; + gettimeofday(&tv,NULL); + return (((long long)tv.tv_sec)*1000000)+tv.tv_usec; +#else + FILETIME ft; + GetSystemTimeAsFileTime(&ft); + return (((long long)ft.dwHighDateTime << 32) | ft.dwLowDateTime) / 10; +#endif +} + +/* The assert() calls below have side effects, so we need assert() + * even if we are compiling without asserts (-DNDEBUG). */ +#ifdef NDEBUG +#undef assert +#define assert(e) (void)(e) +#endif + +/* Helper to extract Redis version information. Aborts on any failure. */ +#define REDIS_VERSION_FIELD "redis_version:" +void get_redis_version(redisContext *c, int *majorptr, int *minorptr) { + redisReply *reply; + char *eptr, *s, *e; + int major, minor; + + reply = redisCommand(c, "INFO"); + if (reply == NULL || c->err || reply->type != REDIS_REPLY_STRING) + goto abort; + if ((s = strstr(reply->str, REDIS_VERSION_FIELD)) == NULL) + goto abort; + + s += strlen(REDIS_VERSION_FIELD); + + /* We need a field terminator and at least 'x.y.z' (5) bytes of data */ + if ((e = strstr(s, "\r\n")) == NULL || (e - s) < 5) + goto abort; + + /* Extract version info */ + major = strtol(s, &eptr, 10); + if (*eptr != '.') goto abort; + minor = strtol(eptr+1, NULL, 10); + + /* Push info the caller wants */ + if (majorptr) *majorptr = major; + if (minorptr) *minorptr = minor; + + freeReplyObject(reply); + return; + +abort: + freeReplyObject(reply); + fprintf(stderr, "Error: Cannot determine Redis version, aborting\n"); + exit(1); +} + +static redisContext *select_database(redisContext *c) { + redisReply *reply; + + /* Switch to DB 9 for testing, now that we know we can chat. */ + reply = redisCommand(c,"SELECT 9"); + assert(reply != NULL); + freeReplyObject(reply); + + /* Make sure the DB is emtpy */ + reply = redisCommand(c,"DBSIZE"); + assert(reply != NULL); + if (reply->type == REDIS_REPLY_INTEGER && reply->integer == 0) { + /* Awesome, DB 9 is empty and we can continue. */ + freeReplyObject(reply); + } else { + printf("Database #9 is not empty, test can not continue\n"); + exit(1); + } + + return c; +} + +/* Switch protocol */ +static void send_hello(redisContext *c, int version) { + redisReply *reply; + int expected; + + reply = redisCommand(c, "HELLO %d", version); + expected = version == 3 ? REDIS_REPLY_MAP : REDIS_REPLY_ARRAY; + assert(reply != NULL && reply->type == expected); + freeReplyObject(reply); +} + +/* Togggle client tracking */ +static void send_client_tracking(redisContext *c, const char *str) { + redisReply *reply; + + reply = redisCommand(c, "CLIENT TRACKING %s", str); + assert(reply != NULL && reply->type == REDIS_REPLY_STATUS); + freeReplyObject(reply); +} + +static int disconnect(redisContext *c, int keep_fd) { + redisReply *reply; + + /* Make sure we're on DB 9. */ + reply = redisCommand(c,"SELECT 9"); + assert(reply != NULL); + freeReplyObject(reply); + reply = redisCommand(c,"FLUSHDB"); + assert(reply != NULL); + freeReplyObject(reply); + + /* Free the context as well, but keep the fd if requested. */ + if (keep_fd) + return redisFreeKeepFd(c); + redisFree(c); + return -1; +} + +static void do_ssl_handshake(redisContext *c) { +#ifdef HIREDIS_TEST_SSL + redisInitiateSSLWithContext(c, _ssl_ctx); + if (c->err) { + printf("SSL error: %s\n", c->errstr); + redisFree(c); + exit(1); + } +#else + (void) c; +#endif +} + +static redisContext *do_connect(struct config config) { + redisContext *c = NULL; + + if (config.type == CONN_TCP) { + c = redisConnect(config.tcp.host, config.tcp.port); + } else if (config.type == CONN_SSL) { + c = redisConnect(config.ssl.host, config.ssl.port); + } else if (config.type == CONN_UNIX) { + c = redisConnectUnix(config.unix_sock.path); + } else if (config.type == CONN_FD) { + /* Create a dummy connection just to get an fd to inherit */ + redisContext *dummy_ctx = redisConnectUnix(config.unix_sock.path); + if (dummy_ctx) { + int fd = disconnect(dummy_ctx, 1); + printf("Connecting to inherited fd %d\n", fd); + c = redisConnectFd(fd); + } + } else { + assert(NULL); + } + + if (c == NULL) { + printf("Connection error: can't allocate redis context\n"); + exit(1); + } else if (c->err) { + printf("Connection error: %s\n", c->errstr); + redisFree(c); + exit(1); + } + + if (config.type == CONN_SSL) { + do_ssl_handshake(c); + } + + return select_database(c); +} + +static void do_reconnect(redisContext *c, struct config config) { + redisReconnect(c); + + if (config.type == CONN_SSL) { + do_ssl_handshake(c); + } +} + +static void test_format_commands(void) { + char *cmd; + int len; + + test("Format command without interpolation: "); + len = redisFormatCommand(&cmd,"SET foo bar"); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nbar\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(3+2)); + hi_free(cmd); + + test("Format command with %%s string interpolation: "); + len = redisFormatCommand(&cmd,"SET %s %s","foo","bar"); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nbar\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(3+2)); + hi_free(cmd); + + test("Format command with %%s and an empty string: "); + len = redisFormatCommand(&cmd,"SET %s %s","foo",""); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$0\r\n\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(0+2)); + hi_free(cmd); + + test("Format command with an empty string in between proper interpolations: "); + len = redisFormatCommand(&cmd,"SET %s %s","","foo"); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$0\r\n\r\n$3\r\nfoo\r\n",len) == 0 && + len == 4+4+(3+2)+4+(0+2)+4+(3+2)); + hi_free(cmd); + + test("Format command with %%b string interpolation: "); + len = redisFormatCommand(&cmd,"SET %b %b","foo",(size_t)3,"b\0r",(size_t)3); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nb\0r\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(3+2)); + hi_free(cmd); + + test("Format command with %%b and an empty string: "); + len = redisFormatCommand(&cmd,"SET %b %b","foo",(size_t)3,"",(size_t)0); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$0\r\n\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(0+2)); + hi_free(cmd); + + test("Format command with literal %%: "); + len = redisFormatCommand(&cmd,"SET %% %%"); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$1\r\n%\r\n$1\r\n%\r\n",len) == 0 && + len == 4+4+(3+2)+4+(1+2)+4+(1+2)); + hi_free(cmd); + + /* Vararg width depends on the type. These tests make sure that the + * width is correctly determined using the format and subsequent varargs + * can correctly be interpolated. */ +#define INTEGER_WIDTH_TEST(fmt, type) do { \ + type value = 123; \ + test("Format command with printf-delegation (" #type "): "); \ + len = redisFormatCommand(&cmd,"key:%08" fmt " str:%s", value, "hello"); \ + test_cond(strncmp(cmd,"*2\r\n$12\r\nkey:00000123\r\n$9\r\nstr:hello\r\n",len) == 0 && \ + len == 4+5+(12+2)+4+(9+2)); \ + hi_free(cmd); \ +} while(0) + +#define FLOAT_WIDTH_TEST(type) do { \ + type value = 123.0; \ + test("Format command with printf-delegation (" #type "): "); \ + len = redisFormatCommand(&cmd,"key:%08.3f str:%s", value, "hello"); \ + test_cond(strncmp(cmd,"*2\r\n$12\r\nkey:0123.000\r\n$9\r\nstr:hello\r\n",len) == 0 && \ + len == 4+5+(12+2)+4+(9+2)); \ + hi_free(cmd); \ +} while(0) + + INTEGER_WIDTH_TEST("d", int); + INTEGER_WIDTH_TEST("hhd", char); + INTEGER_WIDTH_TEST("hd", short); + INTEGER_WIDTH_TEST("ld", long); + INTEGER_WIDTH_TEST("lld", long long); + INTEGER_WIDTH_TEST("u", unsigned int); + INTEGER_WIDTH_TEST("hhu", unsigned char); + INTEGER_WIDTH_TEST("hu", unsigned short); + INTEGER_WIDTH_TEST("lu", unsigned long); + INTEGER_WIDTH_TEST("llu", unsigned long long); + FLOAT_WIDTH_TEST(float); + FLOAT_WIDTH_TEST(double); + + test("Format command with invalid printf format: "); + len = redisFormatCommand(&cmd,"key:%08p %b",(void*)1234,"foo",(size_t)3); + test_cond(len == -1); + + const char *argv[3]; + argv[0] = "SET"; + argv[1] = "foo\0xxx"; + argv[2] = "bar"; + size_t lens[3] = { 3, 7, 3 }; + int argc = 3; + + test("Format command by passing argc/argv without lengths: "); + len = redisFormatCommandArgv(&cmd,argc,argv,NULL); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nbar\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(3+2)); + hi_free(cmd); + + test("Format command by passing argc/argv with lengths: "); + len = redisFormatCommandArgv(&cmd,argc,argv,lens); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$7\r\nfoo\0xxx\r\n$3\r\nbar\r\n",len) == 0 && + len == 4+4+(3+2)+4+(7+2)+4+(3+2)); + hi_free(cmd); + + hisds sds_cmd; + + sds_cmd = NULL; + test("Format command into hisds by passing argc/argv without lengths: "); + len = redisFormatSdsCommandArgv(&sds_cmd,argc,argv,NULL); + test_cond(strncmp(sds_cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nbar\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(3+2)); + hi_sdsfree(sds_cmd); + + sds_cmd = NULL; + test("Format command into hisds by passing argc/argv with lengths: "); + len = redisFormatSdsCommandArgv(&sds_cmd,argc,argv,lens); + test_cond(strncmp(sds_cmd,"*3\r\n$3\r\nSET\r\n$7\r\nfoo\0xxx\r\n$3\r\nbar\r\n",len) == 0 && + len == 4+4+(3+2)+4+(7+2)+4+(3+2)); + hi_sdsfree(sds_cmd); +} + +static void test_append_formatted_commands(struct config config) { + redisContext *c; + redisReply *reply; + char *cmd; + int len; + + c = do_connect(config); + + test("Append format command: "); + + len = redisFormatCommand(&cmd, "SET foo bar"); + + test_cond(redisAppendFormattedCommand(c, cmd, len) == REDIS_OK); + + assert(redisGetReply(c, (void*)&reply) == REDIS_OK); + + hi_free(cmd); + freeReplyObject(reply); + + disconnect(c, 0); +} + +static void test_reply_reader(void) { + redisReader *reader; + void *reply, *root; + int ret; + int i; + + test("Error handling in reply parser: "); + reader = redisReaderCreate(); + redisReaderFeed(reader,(char*)"@foo\r\n",6); + ret = redisReaderGetReply(reader,NULL); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Protocol error, got \"@\" as reply type byte") == 0); + redisReaderFree(reader); + + /* when the reply already contains multiple items, they must be free'd + * on an error. valgrind will bark when this doesn't happen. */ + test("Memory cleanup in reply parser: "); + reader = redisReaderCreate(); + redisReaderFeed(reader,(char*)"*2\r\n",4); + redisReaderFeed(reader,(char*)"$5\r\nhello\r\n",11); + redisReaderFeed(reader,(char*)"@foo\r\n",6); + ret = redisReaderGetReply(reader,NULL); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Protocol error, got \"@\" as reply type byte") == 0); + redisReaderFree(reader); + + reader = redisReaderCreate(); + test("Can handle arbitrarily nested multi-bulks: "); + for (i = 0; i < 128; i++) { + redisReaderFeed(reader,(char*)"*1\r\n", 4); + } + redisReaderFeed(reader,(char*)"$6\r\nLOLWUT\r\n",12); + ret = redisReaderGetReply(reader,&reply); + root = reply; /* Keep track of the root reply */ + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_ARRAY && + ((redisReply*)reply)->elements == 1); + + test("Can parse arbitrarily nested multi-bulks correctly: "); + while(i--) { + assert(reply != NULL && ((redisReply*)reply)->type == REDIS_REPLY_ARRAY); + reply = ((redisReply*)reply)->element[0]; + } + test_cond(((redisReply*)reply)->type == REDIS_REPLY_STRING && + !memcmp(((redisReply*)reply)->str, "LOLWUT", 6)); + freeReplyObject(root); + redisReaderFree(reader); + + test("Correctly parses LLONG_MAX: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, ":9223372036854775807\r\n",22); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_INTEGER && + ((redisReply*)reply)->integer == LLONG_MAX); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error when > LLONG_MAX: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, ":9223372036854775808\r\n",22); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Bad integer value") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Correctly parses LLONG_MIN: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, ":-9223372036854775808\r\n",23); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_INTEGER && + ((redisReply*)reply)->integer == LLONG_MIN); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error when < LLONG_MIN: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, ":-9223372036854775809\r\n",23); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Bad integer value") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error when array < -1: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "*-2\r\n+asdf\r\n",12); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Multi-bulk length out of range") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error when bulk < -1: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "$-2\r\nasdf\r\n",11); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Bulk string length out of range") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Can configure maximum multi-bulk elements: "); + reader = redisReaderCreate(); + reader->maxelements = 1024; + redisReaderFeed(reader, "*1025\r\n", 7); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr, "Multi-bulk length out of range") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Multi-bulk never overflows regardless of maxelements: "); + size_t bad_mbulk_len = (SIZE_MAX / sizeof(void *)) + 3; + char bad_mbulk_reply[100]; + snprintf(bad_mbulk_reply, sizeof(bad_mbulk_reply), "*%llu\r\n+asdf\r\n", + (unsigned long long) bad_mbulk_len); + + reader = redisReaderCreate(); + reader->maxelements = 0; /* Don't rely on default limit */ + redisReaderFeed(reader, bad_mbulk_reply, strlen(bad_mbulk_reply)); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && strcasecmp(reader->errstr, "Out of memory") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + +#if LLONG_MAX > SIZE_MAX + test("Set error when array > SIZE_MAX: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "*9223372036854775807\r\n+asdf\r\n",29); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Multi-bulk length out of range") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error when bulk > SIZE_MAX: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "$9223372036854775807\r\nasdf\r\n",28); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Bulk string length out of range") == 0); + freeReplyObject(reply); + redisReaderFree(reader); +#endif + + test("Works with NULL functions for reply: "); + reader = redisReaderCreate(); + reader->fn = NULL; + redisReaderFeed(reader,(char*)"+OK\r\n",5); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && reply == (void*)REDIS_REPLY_STATUS); + redisReaderFree(reader); + + test("Works when a single newline (\\r\\n) covers two calls to feed: "); + reader = redisReaderCreate(); + reader->fn = NULL; + redisReaderFeed(reader,(char*)"+OK\r",4); + ret = redisReaderGetReply(reader,&reply); + assert(ret == REDIS_OK && reply == NULL); + redisReaderFeed(reader,(char*)"\n",1); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && reply == (void*)REDIS_REPLY_STATUS); + redisReaderFree(reader); + + test("Don't reset state after protocol error: "); + reader = redisReaderCreate(); + reader->fn = NULL; + redisReaderFeed(reader,(char*)"x",1); + ret = redisReaderGetReply(reader,&reply); + assert(ret == REDIS_ERR); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && reply == NULL); + redisReaderFree(reader); + + /* Regression test for issue #45 on GitHub. */ + test("Don't do empty allocation for empty multi bulk: "); + reader = redisReaderCreate(); + redisReaderFeed(reader,(char*)"*0\r\n",4); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_ARRAY && + ((redisReply*)reply)->elements == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + /* RESP3 verbatim strings (GitHub issue #802) */ + test("Can parse RESP3 verbatim strings: "); + reader = redisReaderCreate(); + redisReaderFeed(reader,(char*)"=10\r\ntxt:LOLWUT\r\n",17); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_VERB && + !memcmp(((redisReply*)reply)->str,"LOLWUT", 6)); + freeReplyObject(reply); + redisReaderFree(reader); + + /* RESP3 push messages (Github issue #815) */ + test("Can parse RESP3 push messages: "); + reader = redisReaderCreate(); + redisReaderFeed(reader,(char*)">2\r\n$6\r\nLOLWUT\r\n:42\r\n",21); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_PUSH && + ((redisReply*)reply)->elements == 2 && + ((redisReply*)reply)->element[0]->type == REDIS_REPLY_STRING && + !memcmp(((redisReply*)reply)->element[0]->str,"LOLWUT",6) && + ((redisReply*)reply)->element[1]->type == REDIS_REPLY_INTEGER && + ((redisReply*)reply)->element[1]->integer == 42); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Can parse RESP3 doubles: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, ",3.14159265358979323846\r\n",25); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_DOUBLE && + fabs(((redisReply*)reply)->dval - 3.14159265358979323846) < 0.00000001 && + ((redisReply*)reply)->len == 22 && + strcmp(((redisReply*)reply)->str, "3.14159265358979323846") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error on invalid RESP3 double: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, ",3.14159\000265358979323846\r\n",26); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Bad double value") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Correctly parses RESP3 double INFINITY: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, ",inf\r\n",6); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_DOUBLE && + isinf(((redisReply*)reply)->dval) && + ((redisReply*)reply)->dval > 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error when RESP3 double is NaN: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, ",nan\r\n",6); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Bad double value") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Can parse RESP3 nil: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "_\r\n",3); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_NIL); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error on invalid RESP3 nil: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "_nil\r\n",6); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Bad nil value") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Can parse RESP3 bool (true): "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "#t\r\n",4); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_BOOL && + ((redisReply*)reply)->integer); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Can parse RESP3 bool (false): "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "#f\r\n",4); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_BOOL && + !((redisReply*)reply)->integer); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error on invalid RESP3 bool: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "#foobar\r\n",9); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Bad bool value") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Can parse RESP3 map: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "%2\r\n+first\r\n:123\r\n$6\r\nsecond\r\n#t\r\n",34); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_MAP && + ((redisReply*)reply)->elements == 4 && + ((redisReply*)reply)->element[0]->type == REDIS_REPLY_STATUS && + ((redisReply*)reply)->element[0]->len == 5 && + !strcmp(((redisReply*)reply)->element[0]->str,"first") && + ((redisReply*)reply)->element[1]->type == REDIS_REPLY_INTEGER && + ((redisReply*)reply)->element[1]->integer == 123 && + ((redisReply*)reply)->element[2]->type == REDIS_REPLY_STRING && + ((redisReply*)reply)->element[2]->len == 6 && + !strcmp(((redisReply*)reply)->element[2]->str,"second") && + ((redisReply*)reply)->element[3]->type == REDIS_REPLY_BOOL && + ((redisReply*)reply)->element[3]->integer); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Can parse RESP3 set: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "~5\r\n+orange\r\n$5\r\napple\r\n#f\r\n:100\r\n:999\r\n",40); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_SET && + ((redisReply*)reply)->elements == 5 && + ((redisReply*)reply)->element[0]->type == REDIS_REPLY_STATUS && + ((redisReply*)reply)->element[0]->len == 6 && + !strcmp(((redisReply*)reply)->element[0]->str,"orange") && + ((redisReply*)reply)->element[1]->type == REDIS_REPLY_STRING && + ((redisReply*)reply)->element[1]->len == 5 && + !strcmp(((redisReply*)reply)->element[1]->str,"apple") && + ((redisReply*)reply)->element[2]->type == REDIS_REPLY_BOOL && + !((redisReply*)reply)->element[2]->integer && + ((redisReply*)reply)->element[3]->type == REDIS_REPLY_INTEGER && + ((redisReply*)reply)->element[3]->integer == 100 && + ((redisReply*)reply)->element[4]->type == REDIS_REPLY_INTEGER && + ((redisReply*)reply)->element[4]->integer == 999); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Can parse RESP3 bignum: "); + reader = redisReaderCreate(); + redisReaderFeed(reader,"(3492890328409238509324850943850943825024385\r\n",46); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_BIGNUM && + ((redisReply*)reply)->len == 43 && + !strcmp(((redisReply*)reply)->str,"3492890328409238509324850943850943825024385")); + freeReplyObject(reply); + redisReaderFree(reader); +} + +static void test_free_null(void) { + void *redisCtx = NULL; + void *reply = NULL; + + test("Don't fail when redisFree is passed a NULL value: "); + redisFree(redisCtx); + test_cond(redisCtx == NULL); + + test("Don't fail when freeReplyObject is passed a NULL value: "); + freeReplyObject(reply); + test_cond(reply == NULL); +} + +static void *hi_malloc_fail(size_t size) { + (void)size; + return NULL; +} + +static void *hi_calloc_fail(size_t nmemb, size_t size) { + (void)nmemb; + (void)size; + return NULL; +} + +static void *hi_calloc_insecure(size_t nmemb, size_t size) { + (void)nmemb; + (void)size; + insecure_calloc_calls++; + return (void*)0xdeadc0de; +} + +static void *hi_realloc_fail(void *ptr, size_t size) { + (void)ptr; + (void)size; + return NULL; +} + +static void test_allocator_injection(void) { + void *ptr; + + hiredisAllocFuncs ha = { + .mallocFn = hi_malloc_fail, + .callocFn = hi_calloc_fail, + .reallocFn = hi_realloc_fail, + .strdupFn = strdup, + .freeFn = free, + }; + + // Override hiredis allocators + hiredisSetAllocators(&ha); + + test("redisContext uses injected allocators: "); + redisContext *c = redisConnect("localhost", 6379); + test_cond(c == NULL); + + test("redisReader uses injected allocators: "); + redisReader *reader = redisReaderCreate(); + test_cond(reader == NULL); + + /* Make sure hiredis itself protects against a non-overflow checking calloc */ + test("hiredis calloc wrapper protects against overflow: "); + ha.callocFn = hi_calloc_insecure; + hiredisSetAllocators(&ha); + ptr = hi_calloc((SIZE_MAX / sizeof(void*)) + 3, sizeof(void*)); + test_cond(ptr == NULL && insecure_calloc_calls == 0); + + // Return allocators to default + hiredisResetAllocators(); +} + +#define HIREDIS_BAD_DOMAIN "idontexist-noreally.com" +static void test_blocking_connection_errors(void) { + redisContext *c; + struct addrinfo hints = {.ai_family = AF_INET}; + struct addrinfo *ai_tmp = NULL; + + int rv = getaddrinfo(HIREDIS_BAD_DOMAIN, "6379", &hints, &ai_tmp); + if (rv != 0) { + // Address does *not* exist + test("Returns error when host cannot be resolved: "); + // First see if this domain name *actually* resolves to NXDOMAIN + c = redisConnect(HIREDIS_BAD_DOMAIN, 6379); + test_cond( + c->err == REDIS_ERR_OTHER && + (strcmp(c->errstr, "Name or service not known") == 0 || + strcmp(c->errstr, "Can't resolve: " HIREDIS_BAD_DOMAIN) == 0 || + strcmp(c->errstr, "Name does not resolve") == 0 || + strcmp(c->errstr, "nodename nor servname provided, or not known") == 0 || + strcmp(c->errstr, "No address associated with hostname") == 0 || + strcmp(c->errstr, "Temporary failure in name resolution") == 0 || + strcmp(c->errstr, "hostname nor servname provided, or not known") == 0 || + strcmp(c->errstr, "no address associated with name") == 0 || + strcmp(c->errstr, "No such host is known. ") == 0)); + redisFree(c); + } else { + printf("Skipping NXDOMAIN test. Found evil ISP!\n"); + freeaddrinfo(ai_tmp); + } + +#ifndef _WIN32 + test("Returns error when the port is not open: "); + c = redisConnect((char*)"localhost", 1); + test_cond(c->err == REDIS_ERR_IO && + strcmp(c->errstr,"Connection refused") == 0); + redisFree(c); + + test("Returns error when the unix_sock socket path doesn't accept connections: "); + c = redisConnectUnix((char*)"/tmp/idontexist.sock"); + test_cond(c->err == REDIS_ERR_IO); /* Don't care about the message... */ + redisFree(c); +#endif +} + +/* Test push handler */ +void push_handler(void *privdata, void *r) { + struct pushCounters *pcounts = privdata; + redisReply *reply = r, *payload; + + assert(reply && reply->type == REDIS_REPLY_PUSH && reply->elements == 2); + + payload = reply->element[1]; + if (payload->type == REDIS_REPLY_ARRAY) { + payload = payload->element[0]; + } + + if (payload->type == REDIS_REPLY_STRING) { + pcounts->str++; + } else if (payload->type == REDIS_REPLY_NIL) { + pcounts->nil++; + } + + freeReplyObject(reply); +} + +/* Dummy function just to test setting a callback with redisOptions */ +void push_handler_async(redisAsyncContext *ac, void *reply) { + (void)ac; + (void)reply; +} + +static void test_resp3_push_handler(redisContext *c) { + struct pushCounters pc = {0}; + redisPushFn *old = NULL; + redisReply *reply; + void *privdata; + + /* Switch to RESP3 and turn on client tracking */ + send_hello(c, 3); + send_client_tracking(c, "ON"); + privdata = c->privdata; + c->privdata = &pc; + + reply = redisCommand(c, "GET key:0"); + assert(reply != NULL); + freeReplyObject(reply); + + test("RESP3 PUSH messages are handled out of band by default: "); + reply = redisCommand(c, "SET key:0 val:0"); + test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS); + freeReplyObject(reply); + + assert((reply = redisCommand(c, "GET key:0")) != NULL); + freeReplyObject(reply); + + old = redisSetPushCallback(c, push_handler); + test("We can set a custom RESP3 PUSH handler: "); + reply = redisCommand(c, "SET key:0 val:0"); + test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS && pc.str == 1); + freeReplyObject(reply); + + test("We properly handle a NIL invalidation payload: "); + reply = redisCommand(c, "FLUSHDB"); + test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS && pc.nil == 1); + freeReplyObject(reply); + + /* Unset the push callback and generate an invalidate message making + * sure it is not handled out of band. */ + test("With no handler, PUSH replies come in-band: "); + redisSetPushCallback(c, NULL); + assert((reply = redisCommand(c, "GET key:0")) != NULL); + freeReplyObject(reply); + assert((reply = redisCommand(c, "SET key:0 invalid")) != NULL); + test_cond(reply->type == REDIS_REPLY_PUSH); + freeReplyObject(reply); + + test("With no PUSH handler, no replies are lost: "); + assert(redisGetReply(c, (void**)&reply) == REDIS_OK); + test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS); + freeReplyObject(reply); + + /* Return to the originally set PUSH handler */ + assert(old != NULL); + redisSetPushCallback(c, old); + + /* Switch back to RESP2 and disable tracking */ + c->privdata = privdata; + send_client_tracking(c, "OFF"); + send_hello(c, 2); +} + +redisOptions get_redis_tcp_options(struct config config) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, config.tcp.host, config.tcp.port); + return options; +} + +static void test_resp3_push_options(struct config config) { + redisAsyncContext *ac; + redisContext *c; + redisOptions options; + + test("We set a default RESP3 handler for redisContext: "); + options = get_redis_tcp_options(config); + assert((c = redisConnectWithOptions(&options)) != NULL); + test_cond(c->push_cb != NULL); + redisFree(c); + + test("We don't set a default RESP3 push handler for redisAsyncContext: "); + options = get_redis_tcp_options(config); + assert((ac = redisAsyncConnectWithOptions(&options)) != NULL); + test_cond(ac->c.push_cb == NULL); + redisAsyncFree(ac); + + test("Our REDIS_OPT_NO_PUSH_AUTOFREE flag works: "); + options = get_redis_tcp_options(config); + options.options |= REDIS_OPT_NO_PUSH_AUTOFREE; + assert((c = redisConnectWithOptions(&options)) != NULL); + test_cond(c->push_cb == NULL); + redisFree(c); + + test("We can use redisOptions to set a custom PUSH handler for redisContext: "); + options = get_redis_tcp_options(config); + options.push_cb = push_handler; + assert((c = redisConnectWithOptions(&options)) != NULL); + test_cond(c->push_cb == push_handler); + redisFree(c); + + test("We can use redisOptions to set a custom PUSH handler for redisAsyncContext: "); + options = get_redis_tcp_options(config); + options.async_push_cb = push_handler_async; + assert((ac = redisAsyncConnectWithOptions(&options)) != NULL); + test_cond(ac->push_cb == push_handler_async); + redisAsyncFree(ac); +} + +void free_privdata(void *privdata) { + struct privdata *data = privdata; + data->dtor_counter++; +} + +static void test_privdata_hooks(struct config config) { + struct privdata data = {0}; + redisOptions options; + redisContext *c; + + test("We can use redisOptions to set privdata: "); + options = get_redis_tcp_options(config); + REDIS_OPTIONS_SET_PRIVDATA(&options, &data, free_privdata); + assert((c = redisConnectWithOptions(&options)) != NULL); + test_cond(c->privdata == &data); + + test("Our privdata destructor fires when we free the context: "); + redisFree(c); + test_cond(data.dtor_counter == 1); +} + +static void test_blocking_connection(struct config config) { + redisContext *c; + redisReply *reply; + int major; + + c = do_connect(config); + + test("Is able to deliver commands: "); + reply = redisCommand(c,"PING"); + test_cond(reply->type == REDIS_REPLY_STATUS && + strcasecmp(reply->str,"pong") == 0) + freeReplyObject(reply); + + test("Is a able to send commands verbatim: "); + reply = redisCommand(c,"SET foo bar"); + test_cond (reply->type == REDIS_REPLY_STATUS && + strcasecmp(reply->str,"ok") == 0) + freeReplyObject(reply); + + test("%%s String interpolation works: "); + reply = redisCommand(c,"SET %s %s","foo","hello world"); + freeReplyObject(reply); + reply = redisCommand(c,"GET foo"); + test_cond(reply->type == REDIS_REPLY_STRING && + strcmp(reply->str,"hello world") == 0); + freeReplyObject(reply); + + test("%%b String interpolation works: "); + reply = redisCommand(c,"SET %b %b","foo",(size_t)3,"hello\x00world",(size_t)11); + freeReplyObject(reply); + reply = redisCommand(c,"GET foo"); + test_cond(reply->type == REDIS_REPLY_STRING && + memcmp(reply->str,"hello\x00world",11) == 0) + + test("Binary reply length is correct: "); + test_cond(reply->len == 11) + freeReplyObject(reply); + + test("Can parse nil replies: "); + reply = redisCommand(c,"GET nokey"); + test_cond(reply->type == REDIS_REPLY_NIL) + freeReplyObject(reply); + + /* test 7 */ + test("Can parse integer replies: "); + reply = redisCommand(c,"INCR mycounter"); + test_cond(reply->type == REDIS_REPLY_INTEGER && reply->integer == 1) + freeReplyObject(reply); + + test("Can parse multi bulk replies: "); + freeReplyObject(redisCommand(c,"LPUSH mylist foo")); + freeReplyObject(redisCommand(c,"LPUSH mylist bar")); + reply = redisCommand(c,"LRANGE mylist 0 -1"); + test_cond(reply->type == REDIS_REPLY_ARRAY && + reply->elements == 2 && + !memcmp(reply->element[0]->str,"bar",3) && + !memcmp(reply->element[1]->str,"foo",3)) + freeReplyObject(reply); + + /* m/e with multi bulk reply *before* other reply. + * specifically test ordering of reply items to parse. */ + test("Can handle nested multi bulk replies: "); + freeReplyObject(redisCommand(c,"MULTI")); + freeReplyObject(redisCommand(c,"LRANGE mylist 0 -1")); + freeReplyObject(redisCommand(c,"PING")); + reply = (redisCommand(c,"EXEC")); + test_cond(reply->type == REDIS_REPLY_ARRAY && + reply->elements == 2 && + reply->element[0]->type == REDIS_REPLY_ARRAY && + reply->element[0]->elements == 2 && + !memcmp(reply->element[0]->element[0]->str,"bar",3) && + !memcmp(reply->element[0]->element[1]->str,"foo",3) && + reply->element[1]->type == REDIS_REPLY_STATUS && + strcasecmp(reply->element[1]->str,"pong") == 0); + freeReplyObject(reply); + + /* Make sure passing NULL to redisGetReply is safe */ + test("Can pass NULL to redisGetReply: "); + assert(redisAppendCommand(c, "PING") == REDIS_OK); + test_cond(redisGetReply(c, NULL) == REDIS_OK); + + get_redis_version(c, &major, NULL); + if (major >= 6) test_resp3_push_handler(c); + test_resp3_push_options(config); + + test_privdata_hooks(config); + + disconnect(c, 0); +} + +/* Send DEBUG SLEEP 0 to detect if we have this command */ +static int detect_debug_sleep(redisContext *c) { + int detected; + redisReply *reply = redisCommand(c, "DEBUG SLEEP 0\r\n"); + + if (reply == NULL || c->err) { + const char *cause = c->err ? c->errstr : "(none)"; + fprintf(stderr, "Error testing for DEBUG SLEEP (Redis error: %s), exiting\n", cause); + exit(-1); + } + + detected = reply->type == REDIS_REPLY_STATUS; + freeReplyObject(reply); + + return detected; +} + +static void test_blocking_connection_timeouts(struct config config) { + redisContext *c; + redisReply *reply; + ssize_t s; + const char *sleep_cmd = "DEBUG SLEEP 3\r\n"; + struct timeval tv; + + c = do_connect(config); + test("Successfully completes a command when the timeout is not exceeded: "); + reply = redisCommand(c,"SET foo fast"); + freeReplyObject(reply); + tv.tv_sec = 0; + tv.tv_usec = 10000; + redisSetTimeout(c, tv); + reply = redisCommand(c, "GET foo"); + test_cond(reply != NULL && reply->type == REDIS_REPLY_STRING && memcmp(reply->str, "fast", 4) == 0); + freeReplyObject(reply); + disconnect(c, 0); + + c = do_connect(config); + test("Does not return a reply when the command times out: "); + if (detect_debug_sleep(c)) { + redisAppendFormattedCommand(c, sleep_cmd, strlen(sleep_cmd)); + s = c->funcs->write(c); + tv.tv_sec = 0; + tv.tv_usec = 10000; + redisSetTimeout(c, tv); + reply = redisCommand(c, "GET foo"); +#ifndef _WIN32 + test_cond(s > 0 && reply == NULL && c->err == REDIS_ERR_IO && + strcmp(c->errstr, "Resource temporarily unavailable") == 0); +#else + test_cond(s > 0 && reply == NULL && c->err == REDIS_ERR_TIMEOUT && + strcmp(c->errstr, "recv timeout") == 0); +#endif + freeReplyObject(reply); + } else { + test_skipped(); + } + + test("Reconnect properly reconnects after a timeout: "); + do_reconnect(c, config); + reply = redisCommand(c, "PING"); + test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS && strcmp(reply->str, "PONG") == 0); + freeReplyObject(reply); + + test("Reconnect properly uses owned parameters: "); + config.tcp.host = "foo"; + config.unix_sock.path = "foo"; + do_reconnect(c, config); + reply = redisCommand(c, "PING"); + test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS && strcmp(reply->str, "PONG") == 0); + freeReplyObject(reply); + + disconnect(c, 0); +} + +static void test_blocking_io_errors(struct config config) { + redisContext *c; + redisReply *reply; + void *_reply; + int major, minor; + + /* Connect to target given by config. */ + c = do_connect(config); + get_redis_version(c, &major, &minor); + + test("Returns I/O error when the connection is lost: "); + reply = redisCommand(c,"QUIT"); + if (major > 2 || (major == 2 && minor > 0)) { + /* > 2.0 returns OK on QUIT and read() should be issued once more + * to know the descriptor is at EOF. */ + test_cond(strcasecmp(reply->str,"OK") == 0 && + redisGetReply(c,&_reply) == REDIS_ERR); + freeReplyObject(reply); + } else { + test_cond(reply == NULL); + } + +#ifndef _WIN32 + /* On 2.0, QUIT will cause the connection to be closed immediately and + * the read(2) for the reply on QUIT will set the error to EOF. + * On >2.0, QUIT will return with OK and another read(2) needed to be + * issued to find out the socket was closed by the server. In both + * conditions, the error will be set to EOF. */ + assert(c->err == REDIS_ERR_EOF && + strcmp(c->errstr,"Server closed the connection") == 0); +#endif + redisFree(c); + + c = do_connect(config); + test("Returns I/O error on socket timeout: "); + struct timeval tv = { 0, 1000 }; + assert(redisSetTimeout(c,tv) == REDIS_OK); + int respcode = redisGetReply(c,&_reply); +#ifndef _WIN32 + test_cond(respcode == REDIS_ERR && c->err == REDIS_ERR_IO && errno == EAGAIN); +#else + test_cond(respcode == REDIS_ERR && c->err == REDIS_ERR_TIMEOUT); +#endif + redisFree(c); +} + +static void test_invalid_timeout_errors(struct config config) { + redisContext *c; + + test("Set error when an invalid timeout usec value is given to redisConnectWithTimeout: "); + + config.tcp.timeout.tv_sec = 0; + config.tcp.timeout.tv_usec = 10000001; + + c = redisConnectWithTimeout(config.tcp.host, config.tcp.port, config.tcp.timeout); + + test_cond(c->err == REDIS_ERR_IO && strcmp(c->errstr, "Invalid timeout specified") == 0); + redisFree(c); + + test("Set error when an invalid timeout sec value is given to redisConnectWithTimeout: "); + + config.tcp.timeout.tv_sec = (((LONG_MAX) - 999) / 1000) + 1; + config.tcp.timeout.tv_usec = 0; + + c = redisConnectWithTimeout(config.tcp.host, config.tcp.port, config.tcp.timeout); + + test_cond(c->err == REDIS_ERR_IO && strcmp(c->errstr, "Invalid timeout specified") == 0); + redisFree(c); +} + +/* Wrap malloc to abort on failure so OOM checks don't make the test logic + * harder to follow. */ +void *hi_malloc_safe(size_t size) { + void *ptr = hi_malloc(size); + if (ptr == NULL) { + fprintf(stderr, "Error: Out of memory\n"); + exit(-1); + } + + return ptr; +} + +static void test_throughput(struct config config) { + redisContext *c = do_connect(config); + redisReply **replies; + int i, num; + long long t1, t2; + + test("Throughput:\n"); + for (i = 0; i < 500; i++) + freeReplyObject(redisCommand(c,"LPUSH mylist foo")); + + num = 1000; + replies = hi_malloc_safe(sizeof(redisReply*)*num); + t1 = usec(); + for (i = 0; i < num; i++) { + replies[i] = redisCommand(c,"PING"); + assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_STATUS); + } + t2 = usec(); + for (i = 0; i < num; i++) freeReplyObject(replies[i]); + hi_free(replies); + printf("\t(%dx PING: %.3fs)\n", num, (t2-t1)/1000000.0); + + replies = hi_malloc_safe(sizeof(redisReply*)*num); + t1 = usec(); + for (i = 0; i < num; i++) { + replies[i] = redisCommand(c,"LRANGE mylist 0 499"); + assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_ARRAY); + assert(replies[i] != NULL && replies[i]->elements == 500); + } + t2 = usec(); + for (i = 0; i < num; i++) freeReplyObject(replies[i]); + hi_free(replies); + printf("\t(%dx LRANGE with 500 elements: %.3fs)\n", num, (t2-t1)/1000000.0); + + replies = hi_malloc_safe(sizeof(redisReply*)*num); + t1 = usec(); + for (i = 0; i < num; i++) { + replies[i] = redisCommand(c, "INCRBY incrkey %d", 1000000); + assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_INTEGER); + } + t2 = usec(); + for (i = 0; i < num; i++) freeReplyObject(replies[i]); + hi_free(replies); + printf("\t(%dx INCRBY: %.3fs)\n", num, (t2-t1)/1000000.0); + + num = 10000; + replies = hi_malloc_safe(sizeof(redisReply*)*num); + for (i = 0; i < num; i++) + redisAppendCommand(c,"PING"); + t1 = usec(); + for (i = 0; i < num; i++) { + assert(redisGetReply(c, (void*)&replies[i]) == REDIS_OK); + assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_STATUS); + } + t2 = usec(); + for (i = 0; i < num; i++) freeReplyObject(replies[i]); + hi_free(replies); + printf("\t(%dx PING (pipelined): %.3fs)\n", num, (t2-t1)/1000000.0); + + replies = hi_malloc_safe(sizeof(redisReply*)*num); + for (i = 0; i < num; i++) + redisAppendCommand(c,"LRANGE mylist 0 499"); + t1 = usec(); + for (i = 0; i < num; i++) { + assert(redisGetReply(c, (void*)&replies[i]) == REDIS_OK); + assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_ARRAY); + assert(replies[i] != NULL && replies[i]->elements == 500); + } + t2 = usec(); + for (i = 0; i < num; i++) freeReplyObject(replies[i]); + hi_free(replies); + printf("\t(%dx LRANGE with 500 elements (pipelined): %.3fs)\n", num, (t2-t1)/1000000.0); + + replies = hi_malloc_safe(sizeof(redisReply*)*num); + for (i = 0; i < num; i++) + redisAppendCommand(c,"INCRBY incrkey %d", 1000000); + t1 = usec(); + for (i = 0; i < num; i++) { + assert(redisGetReply(c, (void*)&replies[i]) == REDIS_OK); + assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_INTEGER); + } + t2 = usec(); + for (i = 0; i < num; i++) freeReplyObject(replies[i]); + hi_free(replies); + printf("\t(%dx INCRBY (pipelined): %.3fs)\n", num, (t2-t1)/1000000.0); + + disconnect(c, 0); +} + +// static long __test_callback_flags = 0; +// static void __test_callback(redisContext *c, void *privdata) { +// ((void)c); +// /* Shift to detect execution order */ +// __test_callback_flags <<= 8; +// __test_callback_flags |= (long)privdata; +// } +// +// static void __test_reply_callback(redisContext *c, redisReply *reply, void *privdata) { +// ((void)c); +// /* Shift to detect execution order */ +// __test_callback_flags <<= 8; +// __test_callback_flags |= (long)privdata; +// if (reply) freeReplyObject(reply); +// } +// +// static redisContext *__connect_nonblock() { +// /* Reset callback flags */ +// __test_callback_flags = 0; +// return redisConnectNonBlock("127.0.0.1", port, NULL); +// } +// +// static void test_nonblocking_connection() { +// redisContext *c; +// int wdone = 0; +// +// test("Calls command callback when command is issued: "); +// c = __connect_nonblock(); +// redisSetCommandCallback(c,__test_callback,(void*)1); +// redisCommand(c,"PING"); +// test_cond(__test_callback_flags == 1); +// redisFree(c); +// +// test("Calls disconnect callback on redisDisconnect: "); +// c = __connect_nonblock(); +// redisSetDisconnectCallback(c,__test_callback,(void*)2); +// redisDisconnect(c); +// test_cond(__test_callback_flags == 2); +// redisFree(c); +// +// test("Calls disconnect callback and free callback on redisFree: "); +// c = __connect_nonblock(); +// redisSetDisconnectCallback(c,__test_callback,(void*)2); +// redisSetFreeCallback(c,__test_callback,(void*)4); +// redisFree(c); +// test_cond(__test_callback_flags == ((2 << 8) | 4)); +// +// test("redisBufferWrite against empty write buffer: "); +// c = __connect_nonblock(); +// test_cond(redisBufferWrite(c,&wdone) == REDIS_OK && wdone == 1); +// redisFree(c); +// +// test("redisBufferWrite against not yet connected fd: "); +// c = __connect_nonblock(); +// redisCommand(c,"PING"); +// test_cond(redisBufferWrite(c,NULL) == REDIS_ERR && +// strncmp(c->error,"write:",6) == 0); +// redisFree(c); +// +// test("redisBufferWrite against closed fd: "); +// c = __connect_nonblock(); +// redisCommand(c,"PING"); +// redisDisconnect(c); +// test_cond(redisBufferWrite(c,NULL) == REDIS_ERR && +// strncmp(c->error,"write:",6) == 0); +// redisFree(c); +// +// test("Process callbacks in the right sequence: "); +// c = __connect_nonblock(); +// redisCommandWithCallback(c,__test_reply_callback,(void*)1,"PING"); +// redisCommandWithCallback(c,__test_reply_callback,(void*)2,"PING"); +// redisCommandWithCallback(c,__test_reply_callback,(void*)3,"PING"); +// +// /* Write output buffer */ +// wdone = 0; +// while(!wdone) { +// usleep(500); +// redisBufferWrite(c,&wdone); +// } +// +// /* Read until at least one callback is executed (the 3 replies will +// * arrive in a single packet, causing all callbacks to be executed in +// * a single pass). */ +// while(__test_callback_flags == 0) { +// assert(redisBufferRead(c) == REDIS_OK); +// redisProcessCallbacks(c); +// } +// test_cond(__test_callback_flags == 0x010203); +// redisFree(c); +// +// test("redisDisconnect executes pending callbacks with NULL reply: "); +// c = __connect_nonblock(); +// redisSetDisconnectCallback(c,__test_callback,(void*)1); +// redisCommandWithCallback(c,__test_reply_callback,(void*)2,"PING"); +// redisDisconnect(c); +// test_cond(__test_callback_flags == 0x0201); +// redisFree(c); +// } + +#ifdef HIREDIS_TEST_ASYNC +struct event_base *base; + +typedef struct TestState { + redisOptions *options; + int checkpoint; + int resp3; + int disconnect; +} TestState; + +/* Helper to disconnect and stop event loop */ +void async_disconnect(redisAsyncContext *ac) { + redisAsyncDisconnect(ac); + event_base_loopbreak(base); +} + +/* Testcase timeout, will trigger a failure */ +void timeout_cb(int fd, short event, void *arg) { + (void) fd; (void) event; (void) arg; + printf("Timeout in async testing!\n"); + exit(1); +} + +/* Unexpected call, will trigger a failure */ +void unexpected_cb(redisAsyncContext *ac, void *r, void *privdata) { + (void) ac; (void) r; + printf("Unexpected call: %s\n",(char*)privdata); + exit(1); +} + +/* Helper function to publish a message via own client. */ +void publish_msg(redisOptions *options, const char* channel, const char* msg) { + redisContext *c = redisConnectWithOptions(options); + assert(c != NULL); + redisReply *reply = redisCommand(c,"PUBLISH %s %s",channel,msg); + assert(reply->type == REDIS_REPLY_INTEGER && reply->integer == 1); + freeReplyObject(reply); + disconnect(c, 0); +} + +/* Expect a reply of type INTEGER */ +void integer_cb(redisAsyncContext *ac, void *r, void *privdata) { + redisReply *reply = r; + TestState *state = privdata; + assert(reply != NULL && reply->type == REDIS_REPLY_INTEGER); + state->checkpoint++; + if (state->disconnect) async_disconnect(ac); +} + +/* Subscribe callback for test_pubsub_handling and test_pubsub_handling_resp3: + * - a published message triggers an unsubscribe + * - a command is sent before the unsubscribe response is received. */ +void subscribe_cb(redisAsyncContext *ac, void *r, void *privdata) { + redisReply *reply = r; + TestState *state = privdata; + + assert(reply != NULL && + reply->type == (state->resp3 ? REDIS_REPLY_PUSH : REDIS_REPLY_ARRAY) && + reply->elements == 3); + + if (strcmp(reply->element[0]->str,"subscribe") == 0) { + assert(strcmp(reply->element[1]->str,"mychannel") == 0 && + reply->element[2]->str == NULL); + publish_msg(state->options,"mychannel","Hello!"); + } else if (strcmp(reply->element[0]->str,"message") == 0) { + assert(strcmp(reply->element[1]->str,"mychannel") == 0 && + strcmp(reply->element[2]->str,"Hello!") == 0); + state->checkpoint++; + + /* Unsubscribe after receiving the published message. Send unsubscribe + * which should call the callback registered during subscribe */ + redisAsyncCommand(ac,unexpected_cb, + (void*)"unsubscribe should call subscribe_cb()", + "unsubscribe"); + /* Send a regular command after unsubscribing, then disconnect */ + state->disconnect = 1; + redisAsyncCommand(ac,integer_cb,state,"LPUSH mylist foo"); + + } else if (strcmp(reply->element[0]->str,"unsubscribe") == 0) { + assert(strcmp(reply->element[1]->str,"mychannel") == 0 && + reply->element[2]->str == NULL); + } else { + printf("Unexpected pubsub command: %s\n", reply->element[0]->str); + exit(1); + } +} + +/* Expect a reply of type ARRAY */ +void array_cb(redisAsyncContext *ac, void *r, void *privdata) { + redisReply *reply = r; + TestState *state = privdata; + assert(reply != NULL && reply->type == REDIS_REPLY_ARRAY); + state->checkpoint++; + if (state->disconnect) async_disconnect(ac); +} + +/* Expect a NULL reply */ +void null_cb(redisAsyncContext *ac, void *r, void *privdata) { + (void) ac; + assert(r == NULL); + TestState *state = privdata; + state->checkpoint++; +} + +static void test_pubsub_handling(struct config config) { + test("Subscribe, handle published message and unsubscribe: "); + /* Setup event dispatcher with a testcase timeout */ + base = event_base_new(); + struct event *timeout = evtimer_new(base, timeout_cb, NULL); + assert(timeout != NULL); + + evtimer_assign(timeout,base,timeout_cb,NULL); + struct timeval timeout_tv = {.tv_sec = 10}; + evtimer_add(timeout, &timeout_tv); + + /* Connect */ + redisOptions options = get_redis_tcp_options(config); + redisAsyncContext *ac = redisAsyncConnectWithOptions(&options); + assert(ac != NULL && ac->err == 0); + redisLibeventAttach(ac,base); + + /* Start subscribe */ + TestState state = {.options = &options}; + redisAsyncCommand(ac,subscribe_cb,&state,"subscribe mychannel"); + + /* Make sure non-subscribe commands are handled */ + redisAsyncCommand(ac,array_cb,&state,"PING"); + + /* Start event dispatching loop */ + test_cond(event_base_dispatch(base) == 0); + event_free(timeout); + event_base_free(base); + + /* Verify test checkpoints */ + assert(state.checkpoint == 3); +} + +/* Unexpected push message, will trigger a failure */ +void unexpected_push_cb(redisAsyncContext *ac, void *r) { + (void) ac; (void) r; + printf("Unexpected call to the PUSH callback!\n"); + exit(1); +} + +static void test_pubsub_handling_resp3(struct config config) { + test("Subscribe, handle published message and unsubscribe using RESP3: "); + /* Setup event dispatcher with a testcase timeout */ + base = event_base_new(); + struct event *timeout = evtimer_new(base, timeout_cb, NULL); + assert(timeout != NULL); + + evtimer_assign(timeout,base,timeout_cb,NULL); + struct timeval timeout_tv = {.tv_sec = 10}; + evtimer_add(timeout, &timeout_tv); + + /* Connect */ + redisOptions options = get_redis_tcp_options(config); + redisAsyncContext *ac = redisAsyncConnectWithOptions(&options); + assert(ac != NULL && ac->err == 0); + redisLibeventAttach(ac,base); + + /* Not expecting any push messages in this test */ + redisAsyncSetPushCallback(ac, unexpected_push_cb); + + /* Switch protocol */ + redisAsyncCommand(ac,NULL,NULL,"HELLO 3"); + + /* Start subscribe */ + TestState state = {.options = &options, .resp3 = 1}; + redisAsyncCommand(ac,subscribe_cb,&state,"subscribe mychannel"); + + /* Make sure non-subscribe commands are handled in RESP3 */ + redisAsyncCommand(ac,integer_cb,&state,"LPUSH mylist foo"); + redisAsyncCommand(ac,integer_cb,&state,"LPUSH mylist foo"); + redisAsyncCommand(ac,integer_cb,&state,"LPUSH mylist foo"); + /* Handle an array with 3 elements as a non-subscribe command */ + redisAsyncCommand(ac,array_cb,&state,"LRANGE mylist 0 2"); + + /* Start event dispatching loop */ + test_cond(event_base_dispatch(base) == 0); + event_free(timeout); + event_base_free(base); + + /* Verify test checkpoints */ + assert(state.checkpoint == 6); +} + +/* Subscribe callback for test_command_timeout_during_pubsub: + * - a subscribe response triggers a published message + * - the published message triggers a command that times out + * - the command timeout triggers a disconnect */ +void subscribe_with_timeout_cb(redisAsyncContext *ac, void *r, void *privdata) { + redisReply *reply = r; + TestState *state = privdata; + + /* The non-clean disconnect should trigger the + * subscription callback with a NULL reply. */ + if (reply == NULL) { + state->checkpoint++; + event_base_loopbreak(base); + return; + } + + assert(reply->type == (state->resp3 ? REDIS_REPLY_PUSH : REDIS_REPLY_ARRAY) && + reply->elements == 3); + + if (strcmp(reply->element[0]->str,"subscribe") == 0) { + assert(strcmp(reply->element[1]->str,"mychannel") == 0 && + reply->element[2]->str == NULL); + publish_msg(state->options,"mychannel","Hello!"); + state->checkpoint++; + } else if (strcmp(reply->element[0]->str,"message") == 0) { + assert(strcmp(reply->element[1]->str,"mychannel") == 0 && + strcmp(reply->element[2]->str,"Hello!") == 0); + state->checkpoint++; + + /* Send a command that will trigger a timeout */ + redisAsyncCommand(ac,null_cb,state,"DEBUG SLEEP 3"); + redisAsyncCommand(ac,null_cb,state,"LPUSH mylist foo"); + } else { + printf("Unexpected pubsub command: %s\n", reply->element[0]->str); + exit(1); + } +} + +static void test_command_timeout_during_pubsub(struct config config) { + test("Command timeout during Pub/Sub: "); + /* Setup event dispatcher with a testcase timeout */ + base = event_base_new(); + struct event *timeout = evtimer_new(base,timeout_cb,NULL); + assert(timeout != NULL); + + evtimer_assign(timeout,base,timeout_cb,NULL); + struct timeval timeout_tv = {.tv_sec = 10}; + evtimer_add(timeout,&timeout_tv); + + /* Connect */ + redisOptions options = get_redis_tcp_options(config); + redisAsyncContext *ac = redisAsyncConnectWithOptions(&options); + assert(ac != NULL && ac->err == 0); + redisLibeventAttach(ac,base); + + /* Configure a command timout */ + struct timeval command_timeout = {.tv_sec = 2}; + redisAsyncSetTimeout(ac,command_timeout); + + /* Not expecting any push messages in this test */ + redisAsyncSetPushCallback(ac,unexpected_push_cb); + + /* Switch protocol */ + redisAsyncCommand(ac,NULL,NULL,"HELLO 3"); + + /* Start subscribe */ + TestState state = {.options = &options, .resp3 = 1}; + redisAsyncCommand(ac,subscribe_with_timeout_cb,&state,"subscribe mychannel"); + + /* Start event dispatching loop */ + assert(event_base_dispatch(base) == 0); + event_free(timeout); + event_base_free(base); + + /* Verify test checkpoints */ + test_cond(state.checkpoint == 5); +} + +/* Subscribe callback for test_pubsub_multiple_channels */ +void subscribe_channel_a_cb(redisAsyncContext *ac, void *r, void *privdata) { + redisReply *reply = r; + TestState *state = privdata; + + assert(reply != NULL && reply->type == REDIS_REPLY_ARRAY && + reply->elements == 3); + + if (strcmp(reply->element[0]->str,"subscribe") == 0) { + assert(strcmp(reply->element[1]->str,"A") == 0); + publish_msg(state->options,"A","Hello!"); + state->checkpoint++; + } else if (strcmp(reply->element[0]->str,"message") == 0) { + assert(strcmp(reply->element[1]->str,"A") == 0 && + strcmp(reply->element[2]->str,"Hello!") == 0); + state->checkpoint++; + + /* Unsubscribe to channels, including a channel X which we don't subscribe to */ + redisAsyncCommand(ac,unexpected_cb, + (void*)"unsubscribe should not call unexpected_cb()", + "unsubscribe B X A"); + /* Send a regular command after unsubscribing, then disconnect */ + state->disconnect = 1; + redisAsyncCommand(ac,integer_cb,state,"LPUSH mylist foo"); + } else if (strcmp(reply->element[0]->str,"unsubscribe") == 0) { + assert(strcmp(reply->element[1]->str,"A") == 0); + state->checkpoint++; + } else { + printf("Unexpected pubsub command: %s\n", reply->element[0]->str); + exit(1); + } +} + +/* Subscribe callback for test_pubsub_multiple_channels */ +void subscribe_channel_b_cb(redisAsyncContext *ac, void *r, void *privdata) { + redisReply *reply = r; + TestState *state = privdata; + + assert(reply != NULL && reply->type == REDIS_REPLY_ARRAY && + reply->elements == 3); + + if (strcmp(reply->element[0]->str,"subscribe") == 0) { + assert(strcmp(reply->element[1]->str,"B") == 0); + state->checkpoint++; + } else if (strcmp(reply->element[0]->str,"unsubscribe") == 0) { + assert(strcmp(reply->element[1]->str,"B") == 0); + state->checkpoint++; + } else { + printf("Unexpected pubsub command: %s\n", reply->element[0]->str); + exit(1); + } +} + +/* Test handling of multiple channels + * - subscribe to channel A and B + * - a published message on A triggers an unsubscribe of channel B, X and A + * where channel X is not subscribed to. + * - a command sent after unsubscribe triggers a disconnect */ +static void test_pubsub_multiple_channels(struct config config) { + test("Subscribe to multiple channels: "); + /* Setup event dispatcher with a testcase timeout */ + base = event_base_new(); + struct event *timeout = evtimer_new(base,timeout_cb,NULL); + assert(timeout != NULL); + + evtimer_assign(timeout,base,timeout_cb,NULL); + struct timeval timeout_tv = {.tv_sec = 10}; + evtimer_add(timeout,&timeout_tv); + + /* Connect */ + redisOptions options = get_redis_tcp_options(config); + redisAsyncContext *ac = redisAsyncConnectWithOptions(&options); + assert(ac != NULL && ac->err == 0); + redisLibeventAttach(ac,base); + + /* Not expecting any push messages in this test */ + redisAsyncSetPushCallback(ac,unexpected_push_cb); + + /* Start subscribing to two channels */ + TestState state = {.options = &options}; + redisAsyncCommand(ac,subscribe_channel_a_cb,&state,"subscribe A"); + redisAsyncCommand(ac,subscribe_channel_b_cb,&state,"subscribe B"); + + /* Start event dispatching loop */ + assert(event_base_dispatch(base) == 0); + event_free(timeout); + event_base_free(base); + + /* Verify test checkpoints */ + test_cond(state.checkpoint == 6); +} + +/* Command callback for test_monitor() */ +void monitor_cb(redisAsyncContext *ac, void *r, void *privdata) { + redisReply *reply = r; + TestState *state = privdata; + + /* NULL reply is received when BYE triggers a disconnect. */ + if (reply == NULL) { + event_base_loopbreak(base); + return; + } + + assert(reply != NULL && reply->type == REDIS_REPLY_STATUS); + state->checkpoint++; + + if (state->checkpoint == 1) { + /* Response from MONITOR */ + redisContext *c = redisConnectWithOptions(state->options); + assert(c != NULL); + redisReply *reply = redisCommand(c,"SET first 1"); + assert(reply->type == REDIS_REPLY_STATUS); + freeReplyObject(reply); + redisFree(c); + } else if (state->checkpoint == 2) { + /* Response for monitored command 'SET first 1' */ + assert(strstr(reply->str,"first") != NULL); + redisContext *c = redisConnectWithOptions(state->options); + assert(c != NULL); + redisReply *reply = redisCommand(c,"SET second 2"); + assert(reply->type == REDIS_REPLY_STATUS); + freeReplyObject(reply); + redisFree(c); + } else if (state->checkpoint == 3) { + /* Response for monitored command 'SET second 2' */ + assert(strstr(reply->str,"second") != NULL); + /* Send QUIT to disconnect */ + redisAsyncCommand(ac,NULL,NULL,"QUIT"); + } +} + +/* Test handling of the monitor command + * - sends MONITOR to enable monitoring. + * - sends SET commands via separate clients to be monitored. + * - sends QUIT to stop monitoring and disconnect. */ +static void test_monitor(struct config config) { + test("Enable monitoring: "); + /* Setup event dispatcher with a testcase timeout */ + base = event_base_new(); + struct event *timeout = evtimer_new(base, timeout_cb, NULL); + assert(timeout != NULL); + + evtimer_assign(timeout,base,timeout_cb,NULL); + struct timeval timeout_tv = {.tv_sec = 10}; + evtimer_add(timeout, &timeout_tv); + + /* Connect */ + redisOptions options = get_redis_tcp_options(config); + redisAsyncContext *ac = redisAsyncConnectWithOptions(&options); + assert(ac != NULL && ac->err == 0); + redisLibeventAttach(ac,base); + + /* Not expecting any push messages in this test */ + redisAsyncSetPushCallback(ac,unexpected_push_cb); + + /* Start monitor */ + TestState state = {.options = &options}; + redisAsyncCommand(ac,monitor_cb,&state,"monitor"); + + /* Start event dispatching loop */ + test_cond(event_base_dispatch(base) == 0); + event_free(timeout); + event_base_free(base); + + /* Verify test checkpoints */ + assert(state.checkpoint == 3); +} +#endif /* HIREDIS_TEST_ASYNC */ + +int main(int argc, char **argv) { + struct config cfg = { + .tcp = { + .host = "127.0.0.1", + .port = 6379 + }, + .unix_sock = { + .path = "/tmp/redis.sock" + } + }; + int throughput = 1; + int test_inherit_fd = 1; + int skips_as_fails = 0; + int test_unix_socket; + + /* Parse command line options. */ + argv++; argc--; + while (argc) { + if (argc >= 2 && !strcmp(argv[0],"-h")) { + argv++; argc--; + cfg.tcp.host = argv[0]; + } else if (argc >= 2 && !strcmp(argv[0],"-p")) { + argv++; argc--; + cfg.tcp.port = atoi(argv[0]); + } else if (argc >= 2 && !strcmp(argv[0],"-s")) { + argv++; argc--; + cfg.unix_sock.path = argv[0]; + } else if (argc >= 1 && !strcmp(argv[0],"--skip-throughput")) { + throughput = 0; + } else if (argc >= 1 && !strcmp(argv[0],"--skip-inherit-fd")) { + test_inherit_fd = 0; + } else if (argc >= 1 && !strcmp(argv[0],"--skips-as-fails")) { + skips_as_fails = 1; +#ifdef HIREDIS_TEST_SSL + } else if (argc >= 2 && !strcmp(argv[0],"--ssl-port")) { + argv++; argc--; + cfg.ssl.port = atoi(argv[0]); + } else if (argc >= 2 && !strcmp(argv[0],"--ssl-host")) { + argv++; argc--; + cfg.ssl.host = argv[0]; + } else if (argc >= 2 && !strcmp(argv[0],"--ssl-ca-cert")) { + argv++; argc--; + cfg.ssl.ca_cert = argv[0]; + } else if (argc >= 2 && !strcmp(argv[0],"--ssl-cert")) { + argv++; argc--; + cfg.ssl.cert = argv[0]; + } else if (argc >= 2 && !strcmp(argv[0],"--ssl-key")) { + argv++; argc--; + cfg.ssl.key = argv[0]; +#endif + } else { + fprintf(stderr, "Invalid argument: %s\n", argv[0]); + exit(1); + } + argv++; argc--; + } + +#ifndef _WIN32 + /* Ignore broken pipe signal (for I/O error tests). */ + signal(SIGPIPE, SIG_IGN); + + test_unix_socket = access(cfg.unix_sock.path, F_OK) == 0; + +#else + /* Unix sockets don't exist in Windows */ + test_unix_socket = 0; +#endif + + test_allocator_injection(); + + test_format_commands(); + test_reply_reader(); + test_blocking_connection_errors(); + test_free_null(); + + printf("\nTesting against TCP connection (%s:%d):\n", cfg.tcp.host, cfg.tcp.port); + cfg.type = CONN_TCP; + test_blocking_connection(cfg); + test_blocking_connection_timeouts(cfg); + test_blocking_io_errors(cfg); + test_invalid_timeout_errors(cfg); + test_append_formatted_commands(cfg); + if (throughput) test_throughput(cfg); + + printf("\nTesting against Unix socket connection (%s): ", cfg.unix_sock.path); + if (test_unix_socket) { + printf("\n"); + cfg.type = CONN_UNIX; + test_blocking_connection(cfg); + test_blocking_connection_timeouts(cfg); + test_blocking_io_errors(cfg); + if (throughput) test_throughput(cfg); + } else { + test_skipped(); + } + +#ifdef HIREDIS_TEST_SSL + if (cfg.ssl.port && cfg.ssl.host) { + + redisInitOpenSSL(); + _ssl_ctx = redisCreateSSLContext(cfg.ssl.ca_cert, NULL, cfg.ssl.cert, cfg.ssl.key, NULL, NULL); + assert(_ssl_ctx != NULL); + + printf("\nTesting against SSL connection (%s:%d):\n", cfg.ssl.host, cfg.ssl.port); + cfg.type = CONN_SSL; + + test_blocking_connection(cfg); + test_blocking_connection_timeouts(cfg); + test_blocking_io_errors(cfg); + test_invalid_timeout_errors(cfg); + test_append_formatted_commands(cfg); + if (throughput) test_throughput(cfg); + + redisFreeSSLContext(_ssl_ctx); + _ssl_ctx = NULL; + } +#endif + +#ifdef HIREDIS_TEST_ASYNC + printf("\nTesting asynchronous API against TCP connection (%s:%d):\n", cfg.tcp.host, cfg.tcp.port); + cfg.type = CONN_TCP; + + int major; + redisContext *c = do_connect(cfg); + get_redis_version(c, &major, NULL); + disconnect(c, 0); + + test_pubsub_handling(cfg); + test_pubsub_multiple_channels(cfg); + test_monitor(cfg); + if (major >= 6) { + test_pubsub_handling_resp3(cfg); + test_command_timeout_during_pubsub(cfg); + } +#endif /* HIREDIS_TEST_ASYNC */ + + if (test_inherit_fd) { + printf("\nTesting against inherited fd (%s): ", cfg.unix_sock.path); + if (test_unix_socket) { + printf("\n"); + cfg.type = CONN_FD; + test_blocking_connection(cfg); + } else { + test_skipped(); + } + } + + if (fails || (skips_as_fails && skips)) { + printf("*** %d TESTS FAILED ***\n", fails); + if (skips) { + printf("*** %d TESTS SKIPPED ***\n", skips); + } + return 1; + } + + printf("ALL TESTS PASSED (%d skipped)\n", skips); + return 0; +} diff --git a/deps/hiredis/test.sh b/deps/hiredis/test.sh new file mode 100755 index 0000000..c72bcb0 --- /dev/null +++ b/deps/hiredis/test.sh @@ -0,0 +1,78 @@ +#!/bin/sh -ue + +REDIS_SERVER=${REDIS_SERVER:-redis-server} +REDIS_PORT=${REDIS_PORT:-56379} +REDIS_SSL_PORT=${REDIS_SSL_PORT:-56443} +TEST_SSL=${TEST_SSL:-0} +SKIPS_AS_FAILS=${SKIPS_AS_FAILS-:0} +SSL_TEST_ARGS= +SKIPS_ARG= + +tmpdir=$(mktemp -d) +PID_FILE=${tmpdir}/hiredis-test-redis.pid +SOCK_FILE=${tmpdir}/hiredis-test-redis.sock + +if [ "$TEST_SSL" = "1" ]; then + SSL_CA_CERT=${tmpdir}/ca.crt + SSL_CA_KEY=${tmpdir}/ca.key + SSL_CERT=${tmpdir}/redis.crt + SSL_KEY=${tmpdir}/redis.key + + openssl genrsa -out ${tmpdir}/ca.key 4096 + openssl req \ + -x509 -new -nodes -sha256 \ + -key ${SSL_CA_KEY} \ + -days 3650 \ + -subj '/CN=Hiredis Test CA' \ + -out ${SSL_CA_CERT} + openssl genrsa -out ${SSL_KEY} 2048 + openssl req \ + -new -sha256 \ + -key ${SSL_KEY} \ + -subj '/CN=Hiredis Test Cert' | \ + openssl x509 \ + -req -sha256 \ + -CA ${SSL_CA_CERT} \ + -CAkey ${SSL_CA_KEY} \ + -CAserial ${tmpdir}/ca.txt \ + -CAcreateserial \ + -days 365 \ + -out ${SSL_CERT} + + SSL_TEST_ARGS="--ssl-host 127.0.0.1 --ssl-port ${REDIS_SSL_PORT} --ssl-ca-cert ${SSL_CA_CERT} --ssl-cert ${SSL_CERT} --ssl-key ${SSL_KEY}" +fi + +cleanup() { + set +e + kill $(cat ${PID_FILE}) + rm -rf ${tmpdir} +} +trap cleanup INT TERM EXIT + +cat > ${tmpdir}/redis.conf <> ${tmpdir}/redis.conf < /* for struct timeval */ + +#ifndef inline +#define inline __inline +#endif + +#ifndef strcasecmp +#define strcasecmp stricmp +#endif + +#ifndef strncasecmp +#define strncasecmp strnicmp +#endif + +#ifndef va_copy +#define va_copy(d,s) ((d) = (s)) +#endif + +#ifndef snprintf +#define snprintf c99_snprintf + +__inline int c99_vsnprintf(char* str, size_t size, const char* format, va_list ap) +{ + int count = -1; + + if (size != 0) + count = _vsnprintf_s(str, size, _TRUNCATE, format, ap); + if (count == -1) + count = _vscprintf(format, ap); + + return count; +} + +__inline int c99_snprintf(char* str, size_t size, const char* format, ...) +{ + int count; + va_list ap; + + va_start(ap, format); + count = c99_vsnprintf(str, size, format, ap); + va_end(ap); + + return count; +} +#endif +#endif /* _MSC_VER */ + +#ifdef _WIN32 +#define strerror_r(errno,buf,len) strerror_s(buf,len,errno) +#endif /* _WIN32 */ + +#endif /* _WIN32_HELPER_INCLUDE */ diff --git a/deps/jemalloc/.appveyor.yml b/deps/jemalloc/.appveyor.yml new file mode 100644 index 0000000..90b0368 --- /dev/null +++ b/deps/jemalloc/.appveyor.yml @@ -0,0 +1,42 @@ +version: '{build}' + +environment: + matrix: + - MSYSTEM: MINGW64 + CPU: x86_64 + MSVC: amd64 + CONFIG_FLAGS: --enable-debug + - MSYSTEM: MINGW64 + CPU: x86_64 + CONFIG_FLAGS: --enable-debug + - MSYSTEM: MINGW32 + CPU: i686 + MSVC: x86 + CONFIG_FLAGS: --enable-debug + - MSYSTEM: MINGW32 + CPU: i686 + CONFIG_FLAGS: --enable-debug + - MSYSTEM: MINGW64 + CPU: x86_64 + MSVC: amd64 + - MSYSTEM: MINGW64 + CPU: x86_64 + - MSYSTEM: MINGW32 + CPU: i686 + MSVC: x86 + - MSYSTEM: MINGW32 + CPU: i686 + +install: + - set PATH=c:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH% + - if defined MSVC call "c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %MSVC% + - if defined MSVC pacman --noconfirm -Rsc mingw-w64-%CPU%-gcc gcc + - pacman --noconfirm -Suy mingw-w64-%CPU%-make + +build_script: + - bash -c "autoconf" + - bash -c "./configure $CONFIG_FLAGS" + - mingw32-make + - file lib/jemalloc.dll + - mingw32-make tests + - mingw32-make -k check diff --git a/deps/jemalloc/.autom4te.cfg b/deps/jemalloc/.autom4te.cfg new file mode 100644 index 0000000..fe2424d --- /dev/null +++ b/deps/jemalloc/.autom4te.cfg @@ -0,0 +1,3 @@ +begin-language: "Autoconf-without-aclocal-m4" +args: --no-cache +end-language: "Autoconf-without-aclocal-m4" diff --git a/deps/jemalloc/.cirrus.yml b/deps/jemalloc/.cirrus.yml new file mode 100644 index 0000000..019d2c3 --- /dev/null +++ b/deps/jemalloc/.cirrus.yml @@ -0,0 +1,21 @@ +env: + CIRRUS_CLONE_DEPTH: 1 + ARCH: amd64 + +task: + freebsd_instance: + matrix: + image: freebsd-12-0-release-amd64 + image: freebsd-11-2-release-amd64 + install_script: + - sed -i.bak -e 's,pkg+http://pkg.FreeBSD.org/\${ABI}/quarterly,pkg+http://pkg.FreeBSD.org/\${ABI}/latest,' /etc/pkg/FreeBSD.conf + - pkg upgrade -y + - pkg install -y autoconf gmake + script: + - autoconf + #- ./configure ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS" } $CONFIGURE_FLAGS + - ./configure + - export JFLAG=`sysctl -n kern.smp.cpus` + - gmake -j${JFLAG} + - gmake -j${JFLAG} tests + - gmake check diff --git a/deps/jemalloc/.gitattributes b/deps/jemalloc/.gitattributes new file mode 100644 index 0000000..6313b56 --- /dev/null +++ b/deps/jemalloc/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf diff --git a/deps/jemalloc/.gitignore b/deps/jemalloc/.gitignore new file mode 100644 index 0000000..5ca0ad1 --- /dev/null +++ b/deps/jemalloc/.gitignore @@ -0,0 +1,92 @@ +/bin/jemalloc-config +/bin/jemalloc.sh +/bin/jeprof + +/config.stamp +/config.log +/config.status +/configure + +/doc/html.xsl +/doc/manpages.xsl +/doc/jemalloc.xml +/doc/jemalloc.html +/doc/jemalloc.3 + +/jemalloc.pc + +/lib/ + +/Makefile + +/include/jemalloc/internal/jemalloc_preamble.h +/include/jemalloc/internal/jemalloc_internal_defs.h +/include/jemalloc/internal/private_namespace.gen.h +/include/jemalloc/internal/private_namespace.h +/include/jemalloc/internal/private_namespace_jet.gen.h +/include/jemalloc/internal/private_namespace_jet.h +/include/jemalloc/internal/private_symbols.awk +/include/jemalloc/internal/private_symbols_jet.awk +/include/jemalloc/internal/public_namespace.h +/include/jemalloc/internal/public_symbols.txt +/include/jemalloc/internal/public_unnamespace.h +/include/jemalloc/jemalloc.h +/include/jemalloc/jemalloc_defs.h +/include/jemalloc/jemalloc_macros.h +/include/jemalloc/jemalloc_mangle.h +/include/jemalloc/jemalloc_mangle_jet.h +/include/jemalloc/jemalloc_protos.h +/include/jemalloc/jemalloc_protos_jet.h +/include/jemalloc/jemalloc_rename.h +/include/jemalloc/jemalloc_typedefs.h + +/src/*.[od] +/src/*.sym + +/run_tests.out/ + +/test/test.sh +test/include/test/jemalloc_test.h +test/include/test/jemalloc_test_defs.h + +/test/integration/[A-Za-z]* +!/test/integration/[A-Za-z]*.* +/test/integration/*.[od] +/test/integration/*.out + +/test/integration/cpp/[A-Za-z]* +!/test/integration/cpp/[A-Za-z]*.* +/test/integration/cpp/*.[od] +/test/integration/cpp/*.out + +/test/src/*.[od] + +/test/stress/[A-Za-z]* +!/test/stress/[A-Za-z]*.* +/test/stress/*.[od] +/test/stress/*.out + +/test/unit/[A-Za-z]* +!/test/unit/[A-Za-z]*.* +/test/unit/*.[od] +/test/unit/*.out + +/VERSION + +*.pdb +*.sdf +*.opendb +*.VC.db +*.opensdf +*.cachefile +*.suo +*.user +*.sln.docstates +*.tmp +.vs/ +/msvc/Win32/ +/msvc/x64/ +/msvc/projects/*/*/Debug*/ +/msvc/projects/*/*/Release*/ +/msvc/projects/*/*/Win32/ +/msvc/projects/*/*/x64/ diff --git a/deps/jemalloc/.travis.yml b/deps/jemalloc/.travis.yml new file mode 100644 index 0000000..2da5da8 --- /dev/null +++ b/deps/jemalloc/.travis.yml @@ -0,0 +1,195 @@ +language: generic +dist: precise + +matrix: + include: + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: &gcc_multilib + apt: + packages: + - gcc-multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: *gcc_multilib + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: *gcc_multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: *gcc_multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: *gcc_multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: *gcc_multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: *gcc_multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: *gcc_multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: *gcc_multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: *gcc_multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: *gcc_multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --disable-libdl" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --enable-opt-safety-checks" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-libdl --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-opt-safety-checks --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + # Development build + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-cache-oblivious --enable-stats --enable-log --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + # --enable-expermental-smallocx: + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-experimental-smallocx --enable-stats --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + + # Valgrind + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind" + addons: + apt: + packages: + - valgrind + + +before_script: + - autoconf + - scripts/gen_travis.py > travis_script && diff .travis.yml travis_script + - ./configure ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS" } $CONFIGURE_FLAGS + - make -j3 + - make -j3 tests + +script: + - make check + diff --git a/deps/jemalloc/COPYING b/deps/jemalloc/COPYING new file mode 100644 index 0000000..3b7fd35 --- /dev/null +++ b/deps/jemalloc/COPYING @@ -0,0 +1,27 @@ +Unless otherwise specified, files in the jemalloc source distribution are +subject to the following license: +-------------------------------------------------------------------------------- +Copyright (C) 2002-present Jason Evans . +All rights reserved. +Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. +Copyright (C) 2009-present Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice(s), + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice(s), + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- diff --git a/deps/jemalloc/ChangeLog b/deps/jemalloc/ChangeLog new file mode 100644 index 0000000..e55813b --- /dev/null +++ b/deps/jemalloc/ChangeLog @@ -0,0 +1,1518 @@ +Following are change highlights associated with official releases. Important +bug fixes are all mentioned, but some internal enhancements are omitted here for +brevity. Much more detail can be found in the git revision history: + + https://github.com/jemalloc/jemalloc + +* 5.2.1 (August 5, 2019) + + This release is primarily about Windows. A critical virtual memory leak is + resolved on all Windows platforms. The regression was present in all releases + since 5.0.0. + + Bug fixes: + - Fix a severe virtual memory leak on Windows. This regression was first + released in 5.0.0. (@Ignition, @j0t, @frederik-h, @davidtgoldblatt, + @interwq) + - Fix size 0 handling in posix_memalign(). This regression was first released + in 5.2.0. (@interwq) + - Fix the prof_log unit test which may observe unexpected backtraces from + compiler optimizations. The test was first added in 5.2.0. (@marxin, + @gnzlbg, @interwq) + - Fix the declaration of the extent_avail tree. This regression was first + released in 5.1.0. (@zoulasc) + - Fix an incorrect reference in jeprof. This functionality was first released + in 3.0.0. (@prehistoric-penguin) + - Fix an assertion on the deallocation fast-path. This regression was first + released in 5.2.0. (@yinan1048576) + - Fix the TLS_MODEL attribute in headers. This regression was first released + in 5.0.0. (@zoulasc, @interwq) + + Optimizations and refactors: + - Implement opt.retain on Windows and enable by default on 64-bit. (@interwq, + @davidtgoldblatt) + - Optimize away a branch on the operator delete[] path. (@mgrice) + - Add format annotation to the format generator function. (@zoulasc) + - Refactor and improve the size class header generation. (@yinan1048576) + - Remove best fit. (@djwatson) + - Avoid blocking on background thread locks for stats. (@oranagra, @interwq) + +* 5.2.0 (April 2, 2019) + + This release includes a few notable improvements, which are summarized below: + 1) improved fast-path performance from the optimizations by @djwatson; 2) + reduced virtual memory fragmentation and metadata usage; and 3) bug fixes on + setting the number of background threads. In addition, peak / spike memory + usage is improved with certain allocation patterns. As usual, the release and + prior dev versions have gone through large-scale production testing. + + New features: + - Implement oversize_threshold, which uses a dedicated arena for allocations + crossing the specified threshold to reduce fragmentation. (@interwq) + - Add extents usage information to stats. (@tyleretzel) + - Log time information for sampled allocations. (@tyleretzel) + - Support 0 size in sdallocx. (@djwatson) + - Output rate for certain counters in malloc_stats. (@zinoale) + - Add configure option --enable-readlinkat, which allows the use of readlinkat + over readlink. (@davidtgoldblatt) + - Add configure options --{enable,disable}-{static,shared} to allow not + building unwanted libraries. (@Ericson2314) + - Add configure option --disable-libdl to enable fully static builds. + (@interwq) + - Add mallctl interfaces: + + opt.oversize_threshold (@interwq) + + stats.arenas..extent_avail (@tyleretzel) + + stats.arenas..extents..n{dirty,muzzy,retained} (@tyleretzel) + + stats.arenas..extents..{dirty,muzzy,retained}_bytes + (@tyleretzel) + + Portability improvements: + - Update MSVC builds. (@maksqwe, @rustyx) + - Workaround a compiler optimizer bug on s390x. (@rkmisra) + - Make use of pthread_set_name_np(3) on FreeBSD. (@trasz) + - Implement malloc_getcpu() to enable percpu_arena for windows. (@santagada) + - Link against -pthread instead of -lpthread. (@paravoid) + - Make background_thread not dependent on libdl. (@interwq) + - Add stringify to fix a linker directive issue on MSVC. (@daverigby) + - Detect and fall back when 8-bit atomics are unavailable. (@interwq) + - Fall back to the default pthread_create if dlsym(3) fails. (@interwq) + + Optimizations and refactors: + - Refactor the TSD module. (@davidtgoldblatt) + - Avoid taking extents_muzzy mutex when muzzy is disabled. (@interwq) + - Avoid taking large_mtx for auto arenas on the tcache flush path. (@interwq) + - Optimize ixalloc by avoiding a size lookup. (@interwq) + - Implement opt.oversize_threshold which uses a dedicated arena for requests + crossing the threshold, also eagerly purges the oversize extents. Default + the threshold to 8 MiB. (@interwq) + - Clean compilation with -Wextra. (@gnzlbg, @jasone) + - Refactor the size class module. (@davidtgoldblatt) + - Refactor the stats emitter. (@tyleretzel) + - Optimize pow2_ceil. (@rkmisra) + - Avoid runtime detection of lazy purging on FreeBSD. (@trasz) + - Optimize mmap(2) alignment handling on FreeBSD. (@trasz) + - Improve error handling for THP state initialization. (@jsteemann) + - Rework the malloc() fast path. (@djwatson) + - Rework the free() fast path. (@djwatson) + - Refactor and optimize the tcache fill / flush paths. (@djwatson) + - Optimize sync / lwsync on PowerPC. (@chmeeedalf) + - Bypass extent_dalloc() when retain is enabled. (@interwq) + - Optimize the locking on large deallocation. (@interwq) + - Reduce the number of pages committed from sanity checking in debug build. + (@trasz, @interwq) + - Deprecate OSSpinLock. (@interwq) + - Lower the default number of background threads to 4 (when the feature + is enabled). (@interwq) + - Optimize the trylock spin wait. (@djwatson) + - Use arena index for arena-matching checks. (@interwq) + - Avoid forced decay on thread termination when using background threads. + (@interwq) + - Disable muzzy decay by default. (@djwatson, @interwq) + - Only initialize libgcc unwinder when profiling is enabled. (@paravoid, + @interwq) + + Bug fixes (all only relevant to jemalloc 5.x): + - Fix background thread index issues with max_background_threads. (@djwatson, + @interwq) + - Fix stats output for opt.lg_extent_max_active_fit. (@interwq) + - Fix opt.prof_prefix initialization. (@davidtgoldblatt) + - Properly trigger decay on tcache destroy. (@interwq, @amosbird) + - Fix tcache.flush. (@interwq) + - Detect whether explicit extent zero out is necessary with huge pages or + custom extent hooks, which may change the purge semantics. (@interwq) + - Fix a side effect caused by extent_max_active_fit combined with decay-based + purging, where freed extents can accumulate and not be reused for an + extended period of time. (@interwq, @mpghf) + - Fix a missing unlock on extent register error handling. (@zoulasc) + + Testing: + - Simplify the Travis script output. (@gnzlbg) + - Update the test scripts for FreeBSD. (@devnexen) + - Add unit tests for the producer-consumer pattern. (@interwq) + - Add Cirrus-CI config for FreeBSD builds. (@jasone) + - Add size-matching sanity checks on tcache flush. (@davidtgoldblatt, + @interwq) + + Incompatible changes: + - Remove --with-lg-page-sizes. (@davidtgoldblatt) + + Documentation: + - Attempt to build docs by default, however skip doc building when xsltproc + is missing. (@interwq, @cmuellner) + +* 5.1.0 (May 4, 2018) + + This release is primarily about fine-tuning, ranging from several new features + to numerous notable performance and portability enhancements. The release and + prior dev versions have been running in multiple large scale applications for + months, and the cumulative improvements are substantial in many cases. + + Given the long and successful production runs, this release is likely a good + candidate for applications to upgrade, from both jemalloc 5.0 and before. For + performance-critical applications, the newly added TUNING.md provides + guidelines on jemalloc tuning. + + New features: + - Implement transparent huge page support for internal metadata. (@interwq) + - Add opt.thp to allow enabling / disabling transparent huge pages for all + mappings. (@interwq) + - Add maximum background thread count option. (@djwatson) + - Allow prof_active to control opt.lg_prof_interval and prof.gdump. + (@interwq) + - Allow arena index lookup based on allocation addresses via mallctl. + (@lionkov) + - Allow disabling initial-exec TLS model. (@davidtgoldblatt, @KenMacD) + - Add opt.lg_extent_max_active_fit to set the max ratio between the size of + the active extent selected (to split off from) and the size of the requested + allocation. (@interwq, @davidtgoldblatt) + - Add retain_grow_limit to set the max size when growing virtual address + space. (@interwq) + - Add mallctl interfaces: + + arena..retain_grow_limit (@interwq) + + arenas.lookup (@lionkov) + + max_background_threads (@djwatson) + + opt.lg_extent_max_active_fit (@interwq) + + opt.max_background_threads (@djwatson) + + opt.metadata_thp (@interwq) + + opt.thp (@interwq) + + stats.metadata_thp (@interwq) + + Portability improvements: + - Support GNU/kFreeBSD configuration. (@paravoid) + - Support m68k, nios2 and SH3 architectures. (@paravoid) + - Fall back to FD_CLOEXEC when O_CLOEXEC is unavailable. (@zonyitoo) + - Fix symbol listing for cross-compiling. (@tamird) + - Fix high bits computation on ARM. (@davidtgoldblatt, @paravoid) + - Disable the CPU_SPINWAIT macro for Power. (@davidtgoldblatt, @marxin) + - Fix MSVC 2015 & 2017 builds. (@rustyx) + - Improve RISC-V support. (@EdSchouten) + - Set name mangling script in strict mode. (@nicolov) + - Avoid MADV_HUGEPAGE on ARM. (@marxin) + - Modify configure to determine return value of strerror_r. + (@davidtgoldblatt, @cferris1000) + - Make sure CXXFLAGS is tested with CPP compiler. (@nehaljwani) + - Fix 32-bit build on MSVC. (@rustyx) + - Fix external symbol on MSVC. (@maksqwe) + - Avoid a printf format specifier warning. (@jasone) + - Add configure option --disable-initial-exec-tls which can allow jemalloc to + be dynamically loaded after program startup. (@davidtgoldblatt, @KenMacD) + - AArch64: Add ILP32 support. (@cmuellner) + - Add --with-lg-vaddr configure option to support cross compiling. + (@cmuellner, @davidtgoldblatt) + + Optimizations and refactors: + - Improve active extent fit with extent_max_active_fit. This considerably + reduces fragmentation over time and improves virtual memory and metadata + usage. (@davidtgoldblatt, @interwq) + - Eagerly coalesce large extents to reduce fragmentation. (@interwq) + - sdallocx: only read size info when page aligned (i.e. possibly sampled), + which speeds up the sized deallocation path significantly. (@interwq) + - Avoid attempting new mappings for in place expansion with retain, since + it rarely succeeds in practice and causes high overhead. (@interwq) + - Refactor OOM handling in newImpl. (@wqfish) + - Add internal fine-grained logging functionality for debugging use. + (@davidtgoldblatt) + - Refactor arena / tcache interactions. (@davidtgoldblatt) + - Refactor extent management with dumpable flag. (@davidtgoldblatt) + - Add runtime detection of lazy purging. (@interwq) + - Use pairing heap instead of red-black tree for extents_avail. (@djwatson) + - Use sysctl on startup in FreeBSD. (@trasz) + - Use thread local prng state instead of atomic. (@djwatson) + - Make decay to always purge one more extent than before, because in + practice large extents are usually the ones that cross the decay threshold. + Purging the additional extent helps save memory as well as reduce VM + fragmentation. (@interwq) + - Fast division by dynamic values. (@davidtgoldblatt) + - Improve the fit for aligned allocation. (@interwq, @edwinsmith) + - Refactor extent_t bitpacking. (@rkmisra) + - Optimize the generated assembly for ticker operations. (@davidtgoldblatt) + - Convert stats printing to use a structured text emitter. (@davidtgoldblatt) + - Remove preserve_lru feature for extents management. (@djwatson) + - Consolidate two memory loads into one on the fast deallocation path. + (@davidtgoldblatt, @interwq) + + Bug fixes (most of the issues are only relevant to jemalloc 5.0): + - Fix deadlock with multithreaded fork in OS X. (@davidtgoldblatt) + - Validate returned file descriptor before use. (@zonyitoo) + - Fix a few background thread initialization and shutdown issues. (@interwq) + - Fix an extent coalesce + decay race by taking both coalescing extents off + the LRU list. (@interwq) + - Fix potentially unbound increase during decay, caused by one thread keep + stashing memory to purge while other threads generating new pages. The + number of pages to purge is checked to prevent this. (@interwq) + - Fix a FreeBSD bootstrap assertion. (@strejda, @interwq) + - Handle 32 bit mutex counters. (@rkmisra) + - Fix a indexing bug when creating background threads. (@davidtgoldblatt, + @binliu19) + - Fix arguments passed to extent_init. (@yuleniwo, @interwq) + - Fix addresses used for ordering mutexes. (@rkmisra) + - Fix abort_conf processing during bootstrap. (@interwq) + - Fix include path order for out-of-tree builds. (@cmuellner) + + Incompatible changes: + - Remove --disable-thp. (@interwq) + - Remove mallctl interfaces: + + config.thp (@interwq) + + Documentation: + - Add TUNING.md. (@interwq, @davidtgoldblatt, @djwatson) + +* 5.0.1 (July 1, 2017) + + This bugfix release fixes several issues, most of which are obscure enough + that typical applications are not impacted. + + Bug fixes: + - Update decay->nunpurged before purging, in order to avoid potential update + races and subsequent incorrect purging volume. (@interwq) + - Only abort on dlsym(3) error if the failure impacts an enabled feature (lazy + locking and/or background threads). This mitigates an initialization + failure bug for which we still do not have a clear reproduction test case. + (@interwq) + - Modify tsd management so that it neither crashes nor leaks if a thread's + only allocation activity is to call free() after TLS destructors have been + executed. This behavior was observed when operating with GNU libc, and is + unlikely to be an issue with other libc implementations. (@interwq) + - Mask signals during background thread creation. This prevents signals from + being inadvertently delivered to background threads. (@jasone, + @davidtgoldblatt, @interwq) + - Avoid inactivity checks within background threads, in order to prevent + recursive mutex acquisition. (@interwq) + - Fix extent_grow_retained() to use the specified hooks when the + arena..extent_hooks mallctl is used to override the default hooks. + (@interwq) + - Add missing reentrancy support for custom extent hooks which allocate. + (@interwq) + - Post-fork(2), re-initialize the list of tcaches associated with each arena + to contain no tcaches except the forking thread's. (@interwq) + - Add missing post-fork(2) mutex reinitialization for extent_grow_mtx. This + fixes potential deadlocks after fork(2). (@interwq) + - Enforce minimum autoconf version (currently 2.68), since 2.63 is known to + generate corrupt configure scripts. (@jasone) + - Ensure that the configured page size (--with-lg-page) is no larger than the + configured huge page size (--with-lg-hugepage). (@jasone) + +* 5.0.0 (June 13, 2017) + + Unlike all previous jemalloc releases, this release does not use naturally + aligned "chunks" for virtual memory management, and instead uses page-aligned + "extents". This change has few externally visible effects, but the internal + impacts are... extensive. Many other internal changes combine to make this + the most cohesively designed version of jemalloc so far, with ample + opportunity for further enhancements. + + Continuous integration is now an integral aspect of development thanks to the + efforts of @davidtgoldblatt, and the dev branch tends to remain reasonably + stable on the tested platforms (Linux, FreeBSD, macOS, and Windows). As a + side effect the official release frequency may decrease over time. + + New features: + - Implement optional per-CPU arena support; threads choose which arena to use + based on current CPU rather than on fixed thread-->arena associations. + (@interwq) + - Implement two-phase decay of unused dirty pages. Pages transition from + dirty-->muzzy-->clean, where the first phase transition relies on + madvise(... MADV_FREE) semantics, and the second phase transition discards + pages such that they are replaced with demand-zeroed pages on next access. + (@jasone) + - Increase decay time resolution from seconds to milliseconds. (@jasone) + - Implement opt-in per CPU background threads, and use them for asynchronous + decay-driven unused dirty page purging. (@interwq) + - Add mutex profiling, which collects a variety of statistics useful for + diagnosing overhead/contention issues. (@interwq) + - Add C++ new/delete operator bindings. (@djwatson) + - Support manually created arena destruction, such that all data and metadata + are discarded. Add MALLCTL_ARENAS_DESTROYED for accessing merged stats + associated with destroyed arenas. (@jasone) + - Add MALLCTL_ARENAS_ALL as a fixed index for use in accessing + merged/destroyed arena statistics via mallctl. (@jasone) + - Add opt.abort_conf to optionally abort if invalid configuration options are + detected during initialization. (@interwq) + - Add opt.stats_print_opts, so that e.g. JSON output can be selected for the + stats dumped during exit if opt.stats_print is true. (@jasone) + - Add --with-version=VERSION for use when embedding jemalloc into another + project's git repository. (@jasone) + - Add --disable-thp to support cross compiling. (@jasone) + - Add --with-lg-hugepage to support cross compiling. (@jasone) + - Add mallctl interfaces (various authors): + + background_thread + + opt.abort_conf + + opt.retain + + opt.percpu_arena + + opt.background_thread + + opt.{dirty,muzzy}_decay_ms + + opt.stats_print_opts + + arena..initialized + + arena..destroy + + arena..{dirty,muzzy}_decay_ms + + arena..extent_hooks + + arenas.{dirty,muzzy}_decay_ms + + arenas.bin..slab_size + + arenas.nlextents + + arenas.lextent..size + + arenas.create + + stats.background_thread.{num_threads,num_runs,run_interval} + + stats.mutexes.{ctl,background_thread,prof,reset}. + {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, + num_owner_switch} + + stats.arenas..{dirty,muzzy}_decay_ms + + stats.arenas..uptime + + stats.arenas..{pmuzzy,base,internal,resident} + + stats.arenas..{dirty,muzzy}_{npurge,nmadvise,purged} + + stats.arenas..bins..{nslabs,reslabs,curslabs} + + stats.arenas..bins..mutex. + {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, + num_owner_switch} + + stats.arenas..lextents..{nmalloc,ndalloc,nrequests,curlextents} + + stats.arenas.i.mutexes.{large,extent_avail,extents_dirty,extents_muzzy, + extents_retained,decay_dirty,decay_muzzy,base,tcache_list}. + {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, + num_owner_switch} + + Portability improvements: + - Improve reentrant allocation support, such that deadlock is less likely if + e.g. a system library call in turn allocates memory. (@davidtgoldblatt, + @interwq) + - Support static linking of jemalloc with glibc. (@djwatson) + + Optimizations and refactors: + - Organize virtual memory as "extents" of virtual memory pages, rather than as + naturally aligned "chunks", and store all metadata in arbitrarily distant + locations. This reduces virtual memory external fragmentation, and will + interact better with huge pages (not yet explicitly supported). (@jasone) + - Fold large and huge size classes together; only small and large size classes + remain. (@jasone) + - Unify the allocation paths, and merge most fast-path branching decisions. + (@davidtgoldblatt, @interwq) + - Embed per thread automatic tcache into thread-specific data, which reduces + conditional branches and dereferences. Also reorganize tcache to increase + fast-path data locality. (@interwq) + - Rewrite atomics to closely model the C11 API, convert various + synchronization from mutex-based to atomic, and use the explicit memory + ordering control to resolve various hypothetical races without increasing + synchronization overhead. (@davidtgoldblatt) + - Extensively optimize rtree via various methods: + + Add multiple layers of rtree lookup caching, since rtree lookups are now + part of fast-path deallocation. (@interwq) + + Determine rtree layout at compile time. (@jasone) + + Make the tree shallower for common configurations. (@jasone) + + Embed the root node in the top-level rtree data structure, thus avoiding + one level of indirection. (@jasone) + + Further specialize leaf elements as compared to internal node elements, + and directly embed extent metadata needed for fast-path deallocation. + (@jasone) + + Ignore leading always-zero address bits (architecture-specific). + (@jasone) + - Reorganize headers (ongoing work) to make them hermetic, and disentangle + various module dependencies. (@davidtgoldblatt) + - Convert various internal data structures such as size class metadata from + boot-time-initialized to compile-time-initialized. Propagate resulting data + structure simplifications, such as making arena metadata fixed-size. + (@jasone) + - Simplify size class lookups when constrained to size classes that are + multiples of the page size. This speeds lookups, but the primary benefit is + complexity reduction in code that was the source of numerous regressions. + (@jasone) + - Lock individual extents when possible for localized extent operations, + rather than relying on a top-level arena lock. (@davidtgoldblatt, @jasone) + - Use first fit layout policy instead of best fit, in order to improve + packing. (@jasone) + - If munmap(2) is not in use, use an exponential series to grow each arena's + virtual memory, so that the number of disjoint virtual memory mappings + remains low. (@jasone) + - Implement per arena base allocators, so that arenas never share any virtual + memory pages. (@jasone) + - Automatically generate private symbol name mangling macros. (@jasone) + + Incompatible changes: + - Replace chunk hooks with an expanded/normalized set of extent hooks. + (@jasone) + - Remove ratio-based purging. (@jasone) + - Remove --disable-tcache. (@jasone) + - Remove --disable-tls. (@jasone) + - Remove --enable-ivsalloc. (@jasone) + - Remove --with-lg-size-class-group. (@jasone) + - Remove --with-lg-tiny-min. (@jasone) + - Remove --disable-cc-silence. (@jasone) + - Remove --enable-code-coverage. (@jasone) + - Remove --disable-munmap (replaced by opt.retain). (@jasone) + - Remove Valgrind support. (@jasone) + - Remove quarantine support. (@jasone) + - Remove redzone support. (@jasone) + - Remove mallctl interfaces (various authors): + + config.munmap + + config.tcache + + config.tls + + config.valgrind + + opt.lg_chunk + + opt.purge + + opt.lg_dirty_mult + + opt.decay_time + + opt.quarantine + + opt.redzone + + opt.thp + + arena..lg_dirty_mult + + arena..decay_time + + arena..chunk_hooks + + arenas.initialized + + arenas.lg_dirty_mult + + arenas.decay_time + + arenas.bin..run_size + + arenas.nlruns + + arenas.lrun..size + + arenas.nhchunks + + arenas.hchunk..size + + arenas.extend + + stats.cactive + + stats.arenas..lg_dirty_mult + + stats.arenas..decay_time + + stats.arenas..metadata.{mapped,allocated} + + stats.arenas..{npurge,nmadvise,purged} + + stats.arenas..huge.{allocated,nmalloc,ndalloc,nrequests} + + stats.arenas..bins..{nruns,reruns,curruns} + + stats.arenas..lruns..{nmalloc,ndalloc,nrequests,curruns} + + stats.arenas..hchunks..{nmalloc,ndalloc,nrequests,curhchunks} + + Bug fixes: + - Improve interval-based profile dump triggering to dump only one profile when + a single allocation's size exceeds the interval. (@jasone) + - Use prefixed function names (as controlled by --with-jemalloc-prefix) when + pruning backtrace frames in jeprof. (@jasone) + +* 4.5.0 (February 28, 2017) + + This is the first release to benefit from much broader continuous integration + testing, thanks to @davidtgoldblatt. Had we had this testing infrastructure + in place for prior releases, it would have caught all of the most serious + regressions fixed by this release. + + New features: + - Add --disable-thp and the opt.thp mallctl to provide opt-out mechanisms for + transparent huge page integration. (@jasone) + - Update zone allocator integration to work with macOS 10.12. (@glandium) + - Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and + EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not + during configuration. (@jasone, @ronawho) + + Bug fixes: + - Fix DSS (sbrk(2)-based) allocation. This regression was first released in + 4.3.0. (@jasone) + - Handle race in per size class utilization computation. This functionality + was first released in 4.0.0. (@interwq) + - Fix lock order reversal during gdump. (@jasone) + - Fix/refactor tcache synchronization. This regression was first released in + 4.0.0. (@jasone) + - Fix various JSON-formatted malloc_stats_print() bugs. This functionality + was first released in 4.3.0. (@jasone) + - Fix huge-aligned allocation. This regression was first released in 4.4.0. + (@jasone) + - When transparent huge page integration is enabled, detect what state pages + start in according to the kernel's current operating mode, and only convert + arena chunks to non-huge during purging if that is not their initial state. + This functionality was first released in 4.4.0. (@jasone) + - Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case. + This regression was first released in 4.0.0. (@jasone, @428desmo) + - Properly detect sparc64 when building for Linux. (@glaubitz) + +* 4.4.0 (December 3, 2016) + + New features: + - Add configure support for *-*-linux-android. (@cferris1000, @jasone) + - Add the --disable-syscall configure option, for use on systems that place + security-motivated limitations on syscall(2). (@jasone) + - Add support for Debian GNU/kFreeBSD. (@thesam) + + Optimizations: + - Add extent serial numbers and use them where appropriate as a sort key that + is higher priority than address, so that the allocation policy prefers older + extents. This tends to improve locality (decrease fragmentation) when + memory grows downward. (@jasone) + - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized + on Linux 4.5 and newer. (@jasone) + - Mark partially purged arena chunks as non-huge-page. This improves + interaction with Linux's transparent huge page functionality. (@jasone) + + Bug fixes: + - Fix size class computations for edge conditions involving extremely large + allocations. This regression was first released in 4.0.0. (@jasone, + @ingvarha) + - Remove overly restrictive assertions related to the cactive statistic. This + regression was first released in 4.1.0. (@jasone) + - Implement a more reliable detection scheme for os_unfair_lock on macOS. + (@jszakmeister) + +* 4.3.1 (November 7, 2016) + + Bug fixes: + - Fix a severe virtual memory leak. This regression was first released in + 4.3.0. (@interwq, @jasone) + - Refactor atomic and prng APIs to restore support for 32-bit platforms that + use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone) + +* 4.3.0 (November 4, 2016) + + This is the first release that passes the test suite for multiple Windows + configurations, thanks in large part to @glandium setting up continuous + integration via AppVeyor (and Travis CI for Linux and OS X). + + New features: + - Add "J" (JSON) support to malloc_stats_print(). (@jasone) + - Add Cray compiler support. (@ronawho) + + Optimizations: + - Add/use adaptive spinning for bootstrapping and radix tree node + initialization. (@jasone) + + Bug fixes: + - Fix large allocation to search starting in the optimal size class heap, + which can substantially reduce virtual memory churn and fragmentation. This + regression was first released in 4.0.0. (@mjp41, @jasone) + - Fix stats.arenas..nthreads accounting. (@interwq) + - Fix and simplify decay-based purging. (@jasone) + - Make DSS (sbrk(2)-related) operations lockless, which resolves potential + deadlocks during thread exit. (@jasone) + - Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun, + @jasone) + - Fix over-sized allocation of arena_t (plus associated stats) data + structures. (@jasone, @interwq) + - Fix EXTRA_CFLAGS to not affect configuration. (@jasone) + - Fix a Valgrind integration bug. (@ronawho) + - Disallow 0x5a junk filling when running in Valgrind. (@jasone) + - Fix a file descriptor leak on Linux. This regression was first released in + 4.2.0. (@vsarunas, @jasone) + - Fix static linking of jemalloc with glibc. (@djwatson) + - Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This + works around other libraries' system call wrappers performing reentrant + allocation. (@kspinka, @Whissi, @jasone) + - Fix OS X default zone replacement to work with OS X 10.12. (@glandium, + @jasone) + - Fix cached memory management to avoid needless commit/decommit operations + during purging, which resolves permanent virtual memory map fragmentation + issues on Windows. (@mjp41, @jasone) + - Fix TSD fetches to avoid (recursive) allocation. This is relevant to + non-TLS and Windows configurations. (@jasone) + - Fix malloc_conf overriding to work on Windows. (@jasone) + - Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone) + +* 4.2.1 (June 8, 2016) + + Bug fixes: + - Fix bootstrapping issues for configurations that require allocation during + tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone) + - Fix gettimeofday() version of nstime_update(). (@ronawho) + - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho) + - Fix potential VM map fragmentation regression. (@jasone) + - Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone) + - Fix heap profiling context leaks in reallocation edge cases. (@jasone) + +* 4.2.0 (May 12, 2016) + + New features: + - Add the arena..reset mallctl, which makes it possible to discard all of + an arena's allocations in a single operation. (@jasone) + - Add the stats.retained and stats.arenas..retained statistics. (@jasone) + - Add the --with-version configure option. (@jasone) + - Support --with-lg-page values larger than actual page size. (@jasone) + + Optimizations: + - Use pairing heaps rather than red-black trees for various hot data + structures. (@djwatson, @jasone) + - Streamline fast paths of rtree operations. (@jasone) + - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone) + - Decommit unused virtual memory if the OS does not overcommit. (@jasone) + - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order + to avoid unfortunate interactions during fork(2). (@jasone) + + Bug fixes: + - Fix chunk accounting related to triggering gdump profiles. (@jasone) + - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone) + - Scale leak report summary according to sampling probability. (@jasone) + +* 4.1.1 (May 3, 2016) + + This bugfix release resolves a variety of mostly minor issues, though the + bitmap fix is critical for 64-bit Windows. + + Bug fixes: + - Fix the linear scan version of bitmap_sfu() to shift by the proper amount + even when sizeof(long) is not the same as sizeof(void *), as on 64-bit + Windows. (@jasone) + - Fix hashing functions to avoid unaligned memory accesses (and resulting + crashes). This is relevant at least to some ARM-based platforms. + (@rkmisra) + - Fix fork()-related lock rank ordering reversals. These reversals were + unlikely to cause deadlocks in practice except when heap profiling was + enabled and active. (@jasone) + - Fix various chunk leaks in OOM code paths. (@jasone) + - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone) + - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin) + - Fix a variety of test failures that were due to test fragility rather than + core bugs. (@jasone) + +* 4.1.0 (February 28, 2016) + + This release is primarily about optimizations, but it also incorporates a lot + of portability-motivated refactoring and enhancements. Many people worked on + this release, to an extent that even with the omission here of minor changes + (see git revision history), and of the people who reported and diagnosed + issues, so much of the work was contributed that starting with this release, + changes are annotated with author credits to help reflect the collaborative + effort involved. + + New features: + - Implement decay-based unused dirty page purging, a major optimization with + mallctl API impact. This is an alternative to the existing ratio-based + unused dirty page purging, and is intended to eventually become the sole + purging mechanism. New mallctls: + + opt.purge + + opt.decay_time + + arena..decay + + arena..decay_time + + arenas.decay_time + + stats.arenas..decay_time + (@jasone, @cevans87) + - Add --with-malloc-conf, which makes it possible to embed a default + options string during configuration. This was motivated by the desire to + specify --with-malloc-conf=purge:decay , since the default must remain + purge:ratio until the 5.0.0 release. (@jasone) + - Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin) + - Make *allocx() size class overflow behavior defined. The maximum + size class is now less than PTRDIFF_MAX to protect applications against + numerical overflow, and all allocation functions are guaranteed to indicate + errors rather than potentially crashing if the request size exceeds the + maximum size class. (@jasone) + - jeprof: + + Add raw heap profile support. (@jasone) + + Add --retain and --exclude for backtrace symbol filtering. (@jasone) + + Optimizations: + - Optimize the fast path to combine various bootstrapping and configuration + checks and execute more streamlined code in the common case. (@interwq) + - Use linear scan for small bitmaps (used for small object tracking). In + addition to speeding up bitmap operations on 64-bit systems, this reduces + allocator metadata overhead by approximately 0.2%. (@djwatson) + - Separate arena_avail trees, which substantially speeds up run tree + operations. (@djwatson) + - Use memoization (boot-time-computed table) for run quantization. Separate + arena_avail trees reduced the importance of this optimization. (@jasone) + - Attempt mmap-based in-place huge reallocation. This can dramatically speed + up incremental huge reallocation. (@jasone) + + Incompatible changes: + - Make opt.narenas unsigned rather than size_t. (@jasone) + + Bug fixes: + - Fix stats.cactive accounting regression. (@rustyx, @jasone) + - Handle unaligned keys in hash(). This caused problems for some ARM systems. + (@jasone, @cferris1000) + - Refactor arenas array. In addition to fixing a fork-related deadlock, this + makes arena lookups faster and simpler. (@jasone) + - Move retained memory allocation out of the default chunk allocation + function, to a location that gets executed even if the application installs + a custom chunk allocation function. This resolves a virtual memory leak. + (@buchgr) + - Fix a potential tsd cleanup leak. (@cferris1000, @jasone) + - Fix run quantization. In practice this bug had no impact unless + applications requested memory with alignment exceeding one page. + (@jasone, @djwatson) + - Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv) + - jeprof: + + Don't discard curl options if timeout is not defined. (@djwatson) + + Detect failed profile fetches. (@djwatson) + - Fix stats.arenas..{dss,lg_dirty_mult,decay_time,pactive,pdirty} for + --disable-stats case. (@jasone) + +* 4.0.4 (October 24, 2015) + + This bugfix release fixes another xallocx() regression. No other regressions + have come to light in over a month, so this is likely a good starting point + for people who prefer to wait for "dot one" releases with all the major issues + shaken out. + + Bug fixes: + - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large + allocations that have been randomly assigned an offset of 0 when + --enable-cache-oblivious configure option is enabled. + +* 4.0.3 (September 24, 2015) + + This bugfix release continues the trend of xallocx() and heap profiling fixes. + + Bug fixes: + - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large + allocations when --enable-cache-oblivious configure option is enabled. + - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations + when resizing from/to a size class that is not a multiple of the chunk size. + - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap + profile dumping started. + - Work around a potentially bad thread-specific data initialization + interaction with NPTL (glibc's pthreads implementation). + +* 4.0.2 (September 21, 2015) + + This bugfix release addresses a few bugs specific to heap profiling. + + Bug fixes: + - Fix ixallocx_prof_sample() to never modify nor create sampled small + allocations. xallocx() is in general incapable of moving small allocations, + so this fix removes buggy code without loss of generality. + - Fix irallocx_prof_sample() to always allocate large regions, even when + alignment is non-zero. + - Fix prof_alloc_rollback() to read tdata from thread-specific data rather + than dereferencing a potentially invalid tctx. + +* 4.0.1 (September 15, 2015) + + This is a bugfix release that is somewhat high risk due to the amount of + refactoring required to address deep xallocx() problems. As a side effect of + these fixes, xallocx() now tries harder to partially fulfill requests for + optional extra space. Note that a couple of minor heap profiling + optimizations are included, but these are better thought of as performance + fixes that were integral to discovering most of the other bugs. + + Optimizations: + - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the + fast path when heap profiling is enabled. Additionally, split a special + case out into arena_prof_tctx_reset(), which also avoids chunk metadata + reads. + - Optimize irallocx_prof() to optimistically update the sampler state. The + prior implementation appears to have been a holdover from when + rallocx()/xallocx() functionality was combined as rallocm(). + + Bug fixes: + - Fix TLS configuration such that it is enabled by default for platforms on + which it works correctly. + - Fix arenas_cache_cleanup() and arena_get_hard() to handle + allocation/deallocation within the application's thread-specific data + cleanup functions even after arenas_cache is torn down. + - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS. + - Fix chunk purge hook calls for in-place huge shrinking reallocation to + specify the old chunk size rather than the new chunk size. This bug caused + no correctness issues for the default chunk purge function, but was + visible to custom functions set via the "arena..chunk_hooks" mallctl. + - Fix heap profiling bugs: + + Fix heap profiling to distinguish among otherwise identical sample sites + with interposed resets (triggered via the "prof.reset" mallctl). This bug + could cause data structure corruption that would most likely result in a + segfault. + + Fix irealloc_prof() to prof_alloc_rollback() on OOM. + + Make one call to prof_active_get_unlocked() per allocation event, and use + the result throughout the relevant functions that handle an allocation + event. Also add a missing check in prof_realloc(). These fixes protect + allocation events against concurrent prof_active changes. + + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample() + in the correct order. + + Fix prof_realloc() to call prof_free_sampled_object() after calling + prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were + the same, the tctx could have been prematurely destroyed. + - Fix portability bugs: + + Don't bitshift by negative amounts when encoding/decoding run sizes in + chunk header maps. This affected systems with page sizes greater than 8 + KiB. + + Rename index_t to szind_t to avoid an existing type on Solaris. + + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to + match glibc and avoid compilation errors when including both + jemalloc/jemalloc.h and malloc.h in C++ code. + + Don't assume that /bin/sh is appropriate when running size_classes.sh + during configuration. + + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM. + + Link tests to librt if it contains clock_gettime(2). + +* 4.0.0 (August 17, 2015) + + This version contains many speed and space optimizations, both minor and + major. The major themes are generalization, unification, and simplification. + Although many of these optimizations cause no visible behavior change, their + cumulative effect is substantial. + + New features: + - Normalize size class spacing to be consistent across the complete size + range. By default there are four size classes per size doubling, but this + is now configurable via the --with-lg-size-class-group option. Also add the + --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and + --with-lg-tiny-min options, which can be used to tweak page and size class + settings. Impacts: + + Worst case performance for incrementally growing/shrinking reallocation + is improved because there are far fewer size classes, and therefore + copying happens less often. + + Internal fragmentation is limited to 20% for all but the smallest size + classes (those less than four times the quantum). (1B + 4 KiB) + and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation. + + Chunk fragmentation tends to be lower because there are fewer distinct run + sizes to pack. + - Add support for explicit tcaches. The "tcache.create", "tcache.flush", and + "tcache.destroy" mallctls control tcache lifetime and flushing, and the + MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API + control which tcache is used for each operation. + - Implement per thread heap profiling, as well as the ability to + enable/disable heap profiling on a per thread basis. Add the "prof.reset", + "prof.lg_sample", "thread.prof.name", "thread.prof.active", + "opt.prof_thread_active_init", "prof.thread_active_init", and + "thread.prof.active" mallctls. + - Add support for per arena application-specified chunk allocators, configured + via the "arena..chunk_hooks" mallctl. + - Refactor huge allocation to be managed by arenas, so that arenas now + function as general purpose independent allocators. This is important in + the context of user-specified chunk allocators, aside from the scalability + benefits. Related new statistics: + + The "stats.arenas..huge.allocated", "stats.arenas..huge.nmalloc", + "stats.arenas..huge.ndalloc", and "stats.arenas..huge.nrequests" + mallctls provide high level per arena huge allocation statistics. + + The "arenas.nhchunks", "arenas.hchunk..size", + "stats.arenas..hchunks..nmalloc", + "stats.arenas..hchunks..ndalloc", + "stats.arenas..hchunks..nrequests", and + "stats.arenas..hchunks..curhchunks" mallctls provide per size class + statistics. + - Add the 'util' column to malloc_stats_print() output, which reports the + proportion of available regions that are currently in use for each small + size class. + - Add "alloc" and "free" modes for for junk filling (see the "opt.junk" + mallctl), so that it is possible to separately enable junk filling for + allocation versus deallocation. + - Add the jemalloc-config script, which provides information about how + jemalloc was configured, and how to integrate it into application builds. + - Add metadata statistics, which are accessible via the "stats.metadata", + "stats.arenas..metadata.mapped", and + "stats.arenas..metadata.allocated" mallctls. + - Add the "stats.resident" mallctl, which reports the upper limit of + physically resident memory mapped by the allocator. + - Add per arena control over unused dirty page purging, via the + "arenas.lg_dirty_mult", "arena..lg_dirty_mult", and + "stats.arenas..lg_dirty_mult" mallctls. + - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump + feature on/off during program execution. + - Add sdallocx(), which implements sized deallocation. The primary + optimization over dallocx() is the removal of a metadata read, which often + suffers an L1 cache miss. + - Add missing header includes in jemalloc/jemalloc.h, so that applications + only have to #include . + - Add support for additional platforms: + + Bitrig + + Cygwin + + DragonFlyBSD + + iOS + + OpenBSD + + OpenRISC/or1k + + Optimizations: + - Maintain dirty runs in per arena LRUs rather than in per arena trees of + dirty-run-containing chunks. In practice this change significantly reduces + dirty page purging volume. + - Integrate whole chunks into the unused dirty page purging machinery. This + reduces the cost of repeated huge allocation/deallocation, because it + effectively introduces a cache of chunks. + - Split the arena chunk map into two separate arrays, in order to increase + cache locality for the frequently accessed bits. + - Move small run metadata out of runs, into arena chunk headers. This reduces + run fragmentation, smaller runs reduce external fragmentation for small size + classes, and packed (less uniformly aligned) metadata layout improves CPU + cache set distribution. + - Randomly distribute large allocation base pointer alignment relative to page + boundaries in order to more uniformly utilize CPU cache sets. This can be + disabled via the --disable-cache-oblivious configure option, and queried via + the "config.cache_oblivious" mallctl. + - Micro-optimize the fast paths for the public API functions. + - Refactor thread-specific data to reside in a single structure. This assures + that only a single TLS read is necessary per call into the public API. + - Implement in-place huge allocation growing and shrinking. + - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make + additional optimizations that reduce maximum lookup depth to one or two + levels. This resolves what was a concurrency bottleneck for per arena huge + allocation, because a global data structure is critical for determining + which arenas own which huge allocations. + + Incompatible changes: + - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious + warnings by default. + - Assure that the constness of malloc_usable_size()'s return type matches that + of the system implementation. + - Change the heap profile dump format to support per thread heap profiling, + rename pprof to jeprof, and enhance it with the --thread= option. As a + result, the bundled jeprof must now be used rather than the upstream + (gperftools) pprof. + - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can + internally deadlock on some platforms. + - Change the "arenas.nlruns" mallctl type from size_t to unsigned. + - Replace the "stats.arenas..bins..allocated" mallctl with + "stats.arenas..bins..curregs". + - Ignore MALLOC_CONF in set{uid,gid,cap} binaries. + - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the + MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage. + + Removed features: + - Remove the *allocm() API, which is superseded by the *allocx() API. + - Remove the --enable-dss options, and make dss non-optional on all platforms + which support sbrk(2). + - Remove the "arenas.purge" mallctl, which was obsoleted by the + "arena..purge" mallctl in 3.1.0. + - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically + detects whether it is running inside Valgrind. + - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and + "stats.huge.ndalloc" mallctls. + - Remove the --enable-mremap option. + - Remove the "stats.chunks.current", "stats.chunks.total", and + "stats.chunks.high" mallctls. + + Bug fixes: + - Fix the cactive statistic to decrease (rather than increase) when active + memory decreases. This regression was first released in 3.5.0. + - Fix OOM handling in memalign() and valloc(). A variant of this bug existed + in all releases since 2.0.0, which introduced these functions. + - Fix an OOM-related regression in arena_tcache_fill_small(), which could + cause cache corruption on OOM. This regression was present in all releases + from 2.2.0 through 3.6.0. + - Fix size class overflow handling for malloc(), posix_memalign(), memalign(), + calloc(), and realloc() when profiling is enabled. + - Fix the "arena..dss" mallctl to return an error if "primary" or + "secondary" precedence is specified, but sbrk(2) is not supported. + - Fix fallback lg_floor() implementations to handle extremely large inputs. + - Ensure the default purgeable zone is after the default zone on OS X. + - Fix latent bugs in atomic_*(). + - Fix the "arena..dss" mallctl to handle read-only calls. + - Fix tls_model configuration to enable the initial-exec model when possible. + - Mark malloc_conf as a weak symbol so that the application can override it. + - Correctly detect glibc's adaptive pthread mutexes. + - Fix the --without-export configure option. + +* 3.6.0 (March 31, 2014) + + This version contains a critical bug fix for a regression present in 3.5.0 and + 3.5.1. + + Bug fixes: + - Fix a regression in arena_chunk_alloc() that caused crashes during + small/large allocation if chunk allocation failed. In the absence of this + bug, chunk allocation failure would result in allocation failure, e.g. NULL + return from malloc(). This regression was introduced in 3.5.0. + - Fix backtracing for gcc intrinsics-based backtracing by specifying + -fno-omit-frame-pointer to gcc. Note that the application (and all the + libraries it links to) must also be compiled with this option for + backtracing to be reliable. + - Use dss allocation precedence for huge allocations as well as small/large + allocations. + - Fix test assertion failure message formatting. This bug did not manifest on + x86_64 systems because of implementation subtleties in va_list. + - Fix inconsequential test failures for hash and SFMT code. + + New features: + - Support heap profiling on FreeBSD. This feature depends on the proc + filesystem being mounted during heap profile dumping. + +* 3.5.1 (February 25, 2014) + + This version primarily addresses minor bugs in test code. + + Bug fixes: + - Configure Solaris/Illumos to use MADV_FREE. + - Fix junk filling for mremap(2)-based huge reallocation. This is only + relevant if configuring with the --enable-mremap option specified. + - Avoid compilation failure if 'restrict' C99 keyword is not supported by the + compiler. + - Add a configure test for SSE2 rather than assuming it is usable on i686 + systems. This fixes test compilation errors, especially on 32-bit Linux + systems. + - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit + test. + - Fix/remove flawed alignment-related overflow tests. + - Prevent compiler optimizations that could change backtraces in the + prof_accum unit test. + +* 3.5.0 (January 22, 2014) + + This version focuses on refactoring and automated testing, though it also + includes some non-trivial heap profiling optimizations not mentioned below. + + New features: + - Add the *allocx() API, which is a successor to the experimental *allocm() + API. The *allocx() functions are slightly simpler to use because they have + fewer parameters, they directly return the results of primary interest, and + mallocx()/rallocx() avoid the strict aliasing pitfall that + allocm()/rallocm() share with posix_memalign(). Note that *allocm() is + slated for removal in the next non-bugfix release. + - Add support for LinuxThreads. + + Bug fixes: + - Unless heap profiling is enabled, disable floating point code and don't link + with libm. This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64 + systems, makes it possible to completely disable floating point register + use. Some versions of glibc neglect to save/restore caller-saved floating + point registers during dynamic lazy symbol loading, and the symbol loading + code uses whatever malloc the application happens to have linked/loaded + with, the result being potential floating point register corruption. + - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling + backtrace creation in imemalign(). This bug impacted posix_memalign() and + aligned_alloc(). + - Fix a file descriptor leak in a prof_dump_maps() error path. + - Fix prof_dump() to close the dump file descriptor for all relevant error + paths. + - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for + allocation, not just deallocation. + - Fix a data race for large allocation stats counters. + - Fix a potential infinite loop during thread exit. This bug occurred on + Solaris, and could affect other platforms with similar pthreads TSD + implementations. + - Don't junk-fill reallocations unless usable size changes. This fixes a + violation of the *allocx()/*allocm() semantics. + - Fix growing large reallocation to junk fill new space. + - Fix huge deallocation to junk fill when munmap is disabled. + - Change the default private namespace prefix from empty to je_, and change + --with-private-namespace-prefix so that it prepends an additional prefix + rather than replacing je_. This reduces the likelihood of applications + which statically link jemalloc experiencing symbol name collisions. + - Add missing private namespace mangling (relevant when + --with-private-namespace is specified). + - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as + static even for debug builds. + - Add a missing mutex unlock in a malloc_init_hard() error path. In practice + this error path is never executed. + - Fix numerous bugs in malloc_strotumax() error handling/reporting. These + bugs had no impact except for malformed inputs. + - Fix numerous bugs in malloc_snprintf(). These bugs were not exercised by + existing calls, so they had no impact. + +* 3.4.1 (October 20, 2013) + + Bug fixes: + - Fix a race in the "arenas.extend" mallctl that could cause memory corruption + of internal data structures and subsequent crashes. + - Fix Valgrind integration flaws that caused Valgrind warnings about reads of + uninitialized memory in: + + arena chunk headers + + internal zero-initialized data structures (relevant to tcache and prof + code) + - Preserve errno during the first allocation. A readlink(2) call during + initialization fails unless /etc/malloc.conf exists, so errno was typically + set during the first allocation prior to this fix. + - Fix compilation warnings reported by gcc 4.8.1. + +* 3.4.0 (June 2, 2013) + + This version is essentially a small bugfix release, but the addition of + aarch64 support requires that the minor version be incremented. + + Bug fixes: + - Fix race-triggered deadlocks in chunk_record(). These deadlocks were + typically triggered by multiple threads concurrently deallocating huge + objects. + + New features: + - Add support for the aarch64 architecture. + +* 3.3.1 (March 6, 2013) + + This version fixes bugs that are typically encountered only when utilizing + custom run-time options. + + Bug fixes: + - Fix a locking order bug that could cause deadlock during fork if heap + profiling were enabled. + - Fix a chunk recycling bug that could cause the allocator to lose track of + whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause + corruption if allocating via sbrk(2) (unlikely unless running with the + "dss:primary" option specified). This was completely harmless on Linux + unless using mlockall(2) (and unlikely even then, unless the + --disable-munmap configure option or the "dss:primary" option was + specified). This regression was introduced in 3.1.0 by the + mlockall(2)/madvise(2) interaction fix. + - Fix TLS-related memory corruption that could occur during thread exit if the + thread never allocated memory. Only the quarantine and prof facilities were + susceptible. + - Fix two quarantine bugs: + + Internal reallocation of the quarantined object array leaked the old + array. + + Reallocation failure for internal reallocation of the quarantined object + array (very unlikely) resulted in memory corruption. + - Fix Valgrind integration to annotate all internally allocated memory in a + way that keeps Valgrind happy about internal data structure access. + - Fix building for s390 systems. + +* 3.3.0 (January 23, 2013) + + This version includes a few minor performance improvements in addition to the + listed new features and bug fixes. + + New features: + - Add clipping support to lg_chunk option processing. + - Add the --enable-ivsalloc option. + - Add the --without-export option. + - Add the --disable-zone-allocator option. + + Bug fixes: + - Fix "arenas.extend" mallctl to output the number of arenas. + - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory + is undefined. + - Fix build break on FreeBSD related to alloca.h. + +* 3.2.0 (November 9, 2012) + + In addition to a couple of bug fixes, this version modifies page run + allocation and dirty page purging algorithms in order to better control + page-level virtual memory fragmentation. + + Incompatible changes: + - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1). + + Bug fixes: + - Fix dss/mmap allocation precedence code to use recyclable mmap memory only + after primary dss allocation fails. + - Fix deadlock in the "arenas.purge" mallctl. This regression was introduced + in 3.1.0 by the addition of the "arena..purge" mallctl. + +* 3.1.0 (October 16, 2012) + + New features: + - Auto-detect whether running inside Valgrind, thus removing the need to + manually specify MALLOC_CONF=valgrind:true. + - Add the "arenas.extend" mallctl, which allows applications to create + manually managed arenas. + - Add the ALLOCM_ARENA() flag for {,r,d}allocm(). + - Add the "opt.dss", "arena..dss", and "stats.arenas..dss" mallctls, + which provide control over dss/mmap precedence. + - Add the "arena..purge" mallctl, which obsoletes "arenas.purge". + - Define LG_QUANTUM for hppa. + + Incompatible changes: + - Disable tcache by default if running inside Valgrind, in order to avoid + making unallocated objects appear reachable to Valgrind. + - Drop const from malloc_usable_size() argument on Linux. + + Bug fixes: + - Fix heap profiling crash if sampled object is freed via realloc(p, 0). + - Remove const from __*_hook variable declarations, so that glibc can modify + them during process forking. + - Fix mlockall(2)/madvise(2) interaction. + - Fix fork(2)-related deadlocks. + - Fix error return value for "thread.tcache.enabled" mallctl. + +* 3.0.0 (May 11, 2012) + + Although this version adds some major new features, the primary focus is on + internal code cleanup that facilitates maintainability and portability, most + of which is not reflected in the ChangeLog. This is the first release to + incorporate substantial contributions from numerous other developers, and the + result is a more broadly useful allocator (see the git revision history for + contribution details). Note that the license has been unified, thanks to + Facebook granting a license under the same terms as the other copyright + holders (see COPYING). + + New features: + - Implement Valgrind support, redzones, and quarantine. + - Add support for additional platforms: + + FreeBSD + + Mac OS X Lion + + MinGW + + Windows (no support yet for replacing the system malloc) + - Add support for additional architectures: + + MIPS + + SH4 + + Tilera + - Add support for cross compiling. + - Add nallocm(), which rounds a request size up to the nearest size class + without actually allocating. + - Implement aligned_alloc() (blame C11). + - Add the "thread.tcache.enabled" mallctl. + - Add the "opt.prof_final" mallctl. + - Update pprof (from gperftools 2.0). + - Add the --with-mangling option. + - Add the --disable-experimental option. + - Add the --disable-munmap option, and make it the default on Linux. + - Add the --enable-mremap option, which disables use of mremap(2) by default. + + Incompatible changes: + - Enable stats by default. + - Enable fill by default. + - Disable lazy locking by default. + - Rename the "tcache.flush" mallctl to "thread.tcache.flush". + - Rename the "arenas.pagesize" mallctl to "arenas.page". + - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB). + - Change the "opt.prof_accum" default from true to false. + + Removed features: + - Remove the swap feature, including the "config.swap", "swap.avail", + "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls. + - Remove highruns statistics, including the + "stats.arenas..bins..highruns" and + "stats.arenas..lruns..highruns" mallctls. + - As part of small size class refactoring, remove the "opt.lg_[qc]space_max", + "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and + "arenas.[tqcs]bins" mallctls. + - Remove the "arenas.chunksize" mallctl. + - Remove the "opt.lg_prof_tcmax" option. + - Remove the "opt.lg_prof_bt_max" option. + - Remove the "opt.lg_tcache_gc_sweep" option. + - Remove the --disable-tiny option, including the "config.tiny" mallctl. + - Remove the --enable-dynamic-page-shift configure option. + - Remove the --enable-sysv configure option. + + Bug fixes: + - Fix a statistics-related bug in the "thread.arena" mallctl that could cause + invalid statistics and crashes. + - Work around TLS deallocation via free() on Linux. This bug could cause + write-after-free memory corruption. + - Fix a potential deadlock that could occur during interval- and + growth-triggered heap profile dumps. + - Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags. + - Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could + cause memory corruption and crashes with --enable-dss specified. + - Fix fork-related bugs that could cause deadlock in children between fork + and exec. + - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter. + - Fix realloc(p, 0) to act like free(p). + - Do not enforce minimum alignment in memalign(). + - Check for NULL pointer in malloc_usable_size(). + - Fix an off-by-one heap profile statistics bug that could be observed in + interval- and growth-triggered heap profiles. + - Fix the "epoch" mallctl to update cached stats even if the passed in epoch + is 0. + - Fix bin->runcur management to fix a layout policy bug. This bug did not + affect correctness. + - Fix a bug in choose_arena_hard() that potentially caused more arenas to be + initialized than necessary. + - Add missing "opt.lg_tcache_max" mallctl implementation. + - Use glibc allocator hooks to make mixed allocator usage less likely. + - Fix build issues for --disable-tcache. + - Don't mangle pthread_create() when --with-private-namespace is specified. + +* 2.2.5 (November 14, 2011) + + Bug fixes: + - Fix huge_ralloc() race when using mremap(2). This is a serious bug that + could cause memory corruption and/or crashes. + - Fix huge_ralloc() to maintain chunk statistics. + - Fix malloc_stats_print(..., "a") output. + +* 2.2.4 (November 5, 2011) + + Bug fixes: + - Initialize arenas_tsd before using it. This bug existed for 2.2.[0-3], as + well as for --disable-tls builds in earlier releases. + - Do not assume a 4 KiB page size in test/rallocm.c. + +* 2.2.3 (August 31, 2011) + + This version fixes numerous bugs related to heap profiling. + + Bug fixes: + - Fix a prof-related race condition. This bug could cause memory corruption, + but only occurred in non-default configurations (prof_accum:false). + - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is + excluded from backtraces). + - Fix a prof-related bug in realloc() (only triggered by OOM errors). + - Fix prof-related bugs in allocm() and rallocm(). + - Fix prof_tdata_cleanup() for --disable-tls builds. + - Fix a relative include path, to fix objdir builds. + +* 2.2.2 (July 30, 2011) + + Bug fixes: + - Fix a build error for --disable-tcache. + - Fix assertions in arena_purge() (for real this time). + - Add the --with-private-namespace option. This is a workaround for symbol + conflicts that can inadvertently arise when using static libraries. + +* 2.2.1 (March 30, 2011) + + Bug fixes: + - Implement atomic operations for x86/x64. This fixes compilation failures + for versions of gcc that are still in wide use. + - Fix an assertion in arena_purge(). + +* 2.2.0 (March 22, 2011) + + This version incorporates several improvements to algorithms and data + structures that tend to reduce fragmentation and increase speed. + + New features: + - Add the "stats.cactive" mallctl. + - Update pprof (from google-perftools 1.7). + - Improve backtracing-related configuration logic, and add the + --disable-prof-libgcc option. + + Bug fixes: + - Change default symbol visibility from "internal", to "hidden", which + decreases the overhead of library-internal function calls. + - Fix symbol visibility so that it is also set on OS X. + - Fix a build dependency regression caused by the introduction of the .pic.o + suffix for PIC object files. + - Add missing checks for mutex initialization failures. + - Don't use libgcc-based backtracing except on x64, where it is known to work. + - Fix deadlocks on OS X that were due to memory allocation in + pthread_mutex_lock(). + - Heap profiling-specific fixes: + + Fix memory corruption due to integer overflow in small region index + computation, when using a small enough sample interval that profiling + context pointers are stored in small run headers. + + Fix a bootstrap ordering bug that only occurred with TLS disabled. + + Fix a rallocm() rsize bug. + + Fix error detection bugs for aligned memory allocation. + +* 2.1.3 (March 14, 2011) + + Bug fixes: + - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix + for OS X in 2.1.2). + - Fix a "thread.arena" mallctl bug. + - Fix a thread cache stats merging bug. + +* 2.1.2 (March 2, 2011) + + Bug fixes: + - Fix "thread.{de,}allocatedp" mallctl for OS X. + - Add missing jemalloc.a to build system. + +* 2.1.1 (January 31, 2011) + + Bug fixes: + - Fix aligned huge reallocation (affected allocm()). + - Fix the ALLOCM_LG_ALIGN macro definition. + - Fix a heap dumping deadlock. + - Fix a "thread.arena" mallctl bug. + +* 2.1.0 (December 3, 2010) + + This version incorporates some optimizations that can't quite be considered + bug fixes. + + New features: + - Use Linux's mremap(2) for huge object reallocation when possible. + - Avoid locking in mallctl*() when possible. + - Add the "thread.[de]allocatedp" mallctl's. + - Convert the manual page source from roff to DocBook, and generate both roff + and HTML manuals. + + Bug fixes: + - Fix a crash due to incorrect bootstrap ordering. This only impacted + --enable-debug --enable-dss configurations. + - Fix a minor statistics bug for mallctl("swap.avail", ...). + +* 2.0.1 (October 29, 2010) + + Bug fixes: + - Fix a race condition in heap profiling that could cause undefined behavior + if "opt.prof_accum" were disabled. + - Add missing mutex unlocks for some OOM error paths in the heap profiling + code. + - Fix a compilation error for non-C99 builds. + +* 2.0.0 (October 24, 2010) + + This version focuses on the experimental *allocm() API, and on improved + run-time configuration/introspection. Nonetheless, numerous performance + improvements are also included. + + New features: + - Implement the experimental {,r,s,d}allocm() API, which provides a superset + of the functionality available via malloc(), calloc(), posix_memalign(), + realloc(), malloc_usable_size(), and free(). These functions can be used to + allocate/reallocate aligned zeroed memory, ask for optional extra memory + during reallocation, prevent object movement during reallocation, etc. + - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is + more human-readable, and more flexible. For example: + JEMALLOC_OPTIONS=AJP + is now: + MALLOC_CONF=abort:true,fill:true,stats_print:true + - Port to Apple OS X. Sponsored by Mozilla. + - Make it possible for the application to control thread-->arena mappings via + the "thread.arena" mallctl. + - Add compile-time support for all TLS-related functionality via pthreads TSD. + This is mainly of interest for OS X, which does not support TLS, but has a + TSD implementation with similar performance. + - Override memalign() and valloc() if they are provided by the system. + - Add the "arenas.purge" mallctl, which can be used to synchronously purge all + dirty unused pages. + - Make cumulative heap profiling data optional, so that it is possible to + limit the amount of memory consumed by heap profiling data structures. + - Add per thread allocation counters that can be accessed via the + "thread.allocated" and "thread.deallocated" mallctls. + + Incompatible changes: + - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above). + - Increase default backtrace depth from 4 to 128 for heap profiling. + - Disable interval-based profile dumps by default. + + Bug fixes: + - Remove bad assertions in fork handler functions. These assertions could + cause aborts for some combinations of configure settings. + - Fix strerror_r() usage to deal with non-standard semantics in GNU libc. + - Fix leak context reporting. This bug tended to cause the number of contexts + to be underreported (though the reported number of objects and bytes were + correct). + - Fix a realloc() bug for large in-place growing reallocation. This bug could + cause memory corruption, but it was hard to trigger. + - Fix an allocation bug for small allocations that could be triggered if + multiple threads raced to create a new run of backing pages. + - Enhance the heap profiler to trigger samples based on usable size, rather + than request size. + - Fix a heap profiling bug due to sometimes losing track of requested object + size for sampled objects. + +* 1.0.3 (August 12, 2010) + + Bug fixes: + - Fix the libunwind-based implementation of stack backtracing (used for heap + profiling). This bug could cause zero-length backtraces to be reported. + - Add a missing mutex unlock in library initialization code. If multiple + threads raced to initialize malloc, some of them could end up permanently + blocked. + +* 1.0.2 (May 11, 2010) + + Bug fixes: + - Fix junk filling of large objects, which could cause memory corruption. + - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual + memory limits could cause swap file configuration to fail. Contributed by + Jordan DeLong. + +* 1.0.1 (April 14, 2010) + + Bug fixes: + - Fix compilation when --enable-fill is specified. + - Fix threads-related profiling bugs that affected accuracy and caused memory + to be leaked during thread exit. + - Fix dirty page purging race conditions that could cause crashes. + - Fix crash in tcache flushing code during thread destruction. + +* 1.0.0 (April 11, 2010) + + This release focuses on speed and run-time introspection. Numerous + algorithmic improvements make this release substantially faster than its + predecessors. + + New features: + - Implement autoconf-based configuration system. + - Add mallctl*(), for the purposes of introspection and run-time + configuration. + - Make it possible for the application to manually flush a thread's cache, via + the "tcache.flush" mallctl. + - Base maximum dirty page count on proportion of active memory. + - Compute various additional run-time statistics, including per size class + statistics for large objects. + - Expose malloc_stats_print(), which can be called repeatedly by the + application. + - Simplify the malloc_message() signature to only take one string argument, + and incorporate an opaque data pointer argument for use by the application + in combination with malloc_stats_print(). + - Add support for allocation backed by one or more swap files, and allow the + application to disable over-commit if swap files are in use. + - Implement allocation profiling and leak checking. + + Removed features: + - Remove the dynamic arena rebalancing code, since thread-specific caching + reduces its utility. + + Bug fixes: + - Modify chunk allocation to work when address space layout randomization + (ASLR) is in use. + - Fix thread cleanup bugs related to TLS destruction. + - Handle 0-size allocation requests in posix_memalign(). + - Fix a chunk leak. The leaked chunks were never touched, so this impacted + virtual memory usage, but not physical memory usage. + +* linux_2008082[78]a (August 27/28, 2008) + + These snapshot releases are the simple result of incorporating Linux-specific + support into the FreeBSD malloc sources. + +-------------------------------------------------------------------------------- +vim:filetype=text:textwidth=80 diff --git a/deps/jemalloc/INSTALL.md b/deps/jemalloc/INSTALL.md new file mode 100644 index 0000000..b8f729b --- /dev/null +++ b/deps/jemalloc/INSTALL.md @@ -0,0 +1,421 @@ +Building and installing a packaged release of jemalloc can be as simple as +typing the following while in the root directory of the source tree: + + ./configure + make + make install + +If building from unpackaged developer sources, the simplest command sequence +that might work is: + + ./autogen.sh + make dist + make + make install + +Note that documentation is not built by the default target because doing so +would create a dependency on xsltproc in packaged releases, hence the +requirement to either run 'make dist' or avoid installing docs via the various +install_* targets documented below. + + +## Advanced configuration + +The 'configure' script supports numerous options that allow control of which +functionality is enabled, where jemalloc is installed, etc. Optionally, pass +any of the following arguments (not a definitive list) to 'configure': + +* `--help` + + Print a definitive list of options. + +* `--prefix=` + + Set the base directory in which to install. For example: + + ./configure --prefix=/usr/local + + will cause files to be installed into /usr/local/include, /usr/local/lib, + and /usr/local/man. + +* `--with-version=(..--g|VERSION)` + + The VERSION file is mandatory for successful configuration, and the + following steps are taken to assure its presence: + 1) If --with-version=..--g is specified, + generate VERSION using the specified value. + 2) If --with-version is not specified in either form and the source + directory is inside a git repository, try to generate VERSION via 'git + describe' invocations that pattern-match release tags. + 3) If VERSION is missing, generate it with a bogus version: + 0.0.0-0-g0000000000000000000000000000000000000000 + + Note that --with-version=VERSION bypasses (1) and (2), which simplifies + VERSION configuration when embedding a jemalloc release into another + project's git repository. + +* `--with-rpath=` + + Embed one or more library paths, so that libjemalloc can find the libraries + it is linked to. This works only on ELF-based systems. + +* `--with-mangling=` + + Mangle public symbols specified in which is a comma-separated list of + name:mangled pairs. + + For example, to use ld's --wrap option as an alternative method for + overriding libc's malloc implementation, specify something like: + + --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...] + + Note that mangling happens prior to application of the prefix specified by + --with-jemalloc-prefix, and mangled symbols are then ignored when applying + the prefix. + +* `--with-jemalloc-prefix=` + + Prefix all public APIs with . For example, if is + "prefix_", API changes like the following occur: + + malloc() --> prefix_malloc() + malloc_conf --> prefix_malloc_conf + /etc/malloc.conf --> /etc/prefix_malloc.conf + MALLOC_CONF --> PREFIX_MALLOC_CONF + + This makes it possible to use jemalloc at the same time as the system + allocator, or even to use multiple copies of jemalloc simultaneously. + + By default, the prefix is "", except on OS X, where it is "je_". On OS X, + jemalloc overlays the default malloc zone, but makes no attempt to actually + replace the "malloc", "calloc", etc. symbols. + +* `--without-export` + + Don't export public APIs. This can be useful when building jemalloc as a + static library, or to avoid exporting public APIs when using the zone + allocator on OSX. + +* `--with-private-namespace=` + + Prefix all library-private APIs with je_. For shared libraries, + symbol visibility mechanisms prevent these symbols from being exported, but + for static libraries, naming collisions are a real possibility. By + default, is empty, which results in a symbol prefix of je_ . + +* `--with-install-suffix=` + + Append to the base name of all installed files, such that multiple + versions of jemalloc can coexist in the same installation directory. For + example, libjemalloc.so.0 becomes libjemalloc.so.0. + +* `--with-malloc-conf=` + + Embed `` as a run-time options string that is processed prior to + the malloc_conf global variable, the /etc/malloc.conf symlink, and the + MALLOC_CONF environment variable. For example, to change the default decay + time to 30 seconds: + + --with-malloc-conf=decay_ms:30000 + +* `--enable-debug` + + Enable assertions and validation code. This incurs a substantial + performance hit, but is very useful during application development. + +* `--disable-stats` + + Disable statistics gathering functionality. See the "opt.stats_print" + option documentation for usage details. + +* `--enable-prof` + + Enable heap profiling and leak detection functionality. See the "opt.prof" + option documentation for usage details. When enabled, there are several + approaches to backtracing, and the configure script chooses the first one + in the following list that appears to function correctly: + + + libunwind (requires --enable-prof-libunwind) + + libgcc (unless --disable-prof-libgcc) + + gcc intrinsics (unless --disable-prof-gcc) + +* `--enable-prof-libunwind` + + Use the libunwind library (http://www.nongnu.org/libunwind/) for stack + backtracing. + +* `--disable-prof-libgcc` + + Disable the use of libgcc's backtracing functionality. + +* `--disable-prof-gcc` + + Disable the use of gcc intrinsics for backtracing. + +* `--with-static-libunwind=` + + Statically link against the specified libunwind.a rather than dynamically + linking with -lunwind. + +* `--disable-fill` + + Disable support for junk/zero filling of memory. See the "opt.junk" and + "opt.zero" option documentation for usage details. + +* `--disable-zone-allocator` + + Disable zone allocator for Darwin. This means jemalloc won't be hooked as + the default allocator on OSX/iOS. + +* `--enable-utrace` + + Enable utrace(2)-based allocation tracing. This feature is not broadly + portable (FreeBSD has it, but Linux and OS X do not). + +* `--enable-xmalloc` + + Enable support for optional immediate termination due to out-of-memory + errors, as is commonly implemented by "xmalloc" wrapper function for malloc. + See the "opt.xmalloc" option documentation for usage details. + +* `--enable-lazy-lock` + + Enable code that wraps pthread_create() to detect when an application + switches from single-threaded to multi-threaded mode, so that it can avoid + mutex locking/unlocking operations while in single-threaded mode. In + practice, this feature usually has little impact on performance unless + thread-specific caching is disabled. + +* `--disable-cache-oblivious` + + Disable cache-oblivious large allocation alignment for large allocation + requests with no alignment constraints. If this feature is disabled, all + large allocations are page-aligned as an implementation artifact, which can + severely harm CPU cache utilization. However, the cache-oblivious layout + comes at the cost of one extra page per large allocation, which in the + most extreme case increases physical memory usage for the 16 KiB size class + to 20 KiB. + +* `--disable-syscall` + + Disable use of syscall(2) rather than {open,read,write,close}(2). This is + intended as a workaround for systems that place security limitations on + syscall(2). + +* `--disable-cxx` + + Disable C++ integration. This will cause new and delete operator + implementations to be omitted. + +* `--with-xslroot=` + + Specify where to find DocBook XSL stylesheets when building the + documentation. + +* `--with-lg-page=` + + Specify the base 2 log of the allocator page size, which must in turn be at + least as large as the system page size. By default the configure script + determines the host's page size and sets the allocator page size equal to + the system page size, so this option need not be specified unless the + system page size may change between configuration and execution, e.g. when + cross compiling. + +* `--with-lg-hugepage=` + + Specify the base 2 log of the system huge page size. This option is useful + when cross compiling, or when overriding the default for systems that do + not explicitly support huge pages. + +* `--with-lg-quantum=` + + Specify the base 2 log of the minimum allocation alignment. jemalloc needs + to know the minimum alignment that meets the following C standard + requirement (quoted from the April 12, 2011 draft of the C11 standard): + + > The pointer returned if the allocation succeeds is suitably aligned so + that it may be assigned to a pointer to any type of object with a + fundamental alignment requirement and then used to access such an object + or an array of such objects in the space allocated [...] + + This setting is architecture-specific, and although jemalloc includes known + safe values for the most commonly used modern architectures, there is a + wrinkle related to GNU libc (glibc) that may impact your choice of + . On most modern architectures, this mandates 16-byte + alignment (=4), but the glibc developers chose not to meet this + requirement for performance reasons. An old discussion can be found at + . Unlike glibc, + jemalloc does follow the C standard by default (caveat: jemalloc + technically cheats for size classes smaller than the quantum), but the fact + that Linux systems already work around this allocator noncompliance means + that it is generally safe in practice to let jemalloc's minimum alignment + follow glibc's lead. If you specify `--with-lg-quantum=3` during + configuration, jemalloc will provide additional size classes that are not + 16-byte-aligned (24, 40, and 56). + +* `--with-lg-vaddr=` + + Specify the number of significant virtual address bits. By default, the + configure script attempts to detect virtual address size on those platforms + where it knows how, and picks a default otherwise. This option may be + useful when cross-compiling. + +* `--disable-initial-exec-tls` + + Disable the initial-exec TLS model for jemalloc's internal thread-local + storage (on those platforms that support explicit settings). This can allow + jemalloc to be dynamically loaded after program startup (e.g. using dlopen). + Note that in this case, there will be two malloc implementations operating + in the same process, which will almost certainly result in confusing runtime + crashes if pointers leak from one implementation to the other. + +* `--disable-libdl` + + Disable the usage of libdl, namely dlsym(3) which is required by the lazy + lock option. This can allow building static binaries. + +The following environment variables (not a definitive list) impact configure's +behavior: + +* `CFLAGS="?"` +* `CXXFLAGS="?"` + + Pass these flags to the C/C++ compiler. Any flags set by the configure + script are prepended, which means explicitly set flags generally take + precedence. Take care when specifying flags such as -Werror, because + configure tests may be affected in undesirable ways. + +* `EXTRA_CFLAGS="?"` +* `EXTRA_CXXFLAGS="?"` + + Append these flags to CFLAGS/CXXFLAGS, without passing them to the + compiler(s) during configuration. This makes it possible to add flags such + as -Werror, while allowing the configure script to determine what other + flags are appropriate for the specified configuration. + +* `CPPFLAGS="?"` + + Pass these flags to the C preprocessor. Note that CFLAGS is not passed to + 'cpp' when 'configure' is looking for include files, so you must use + CPPFLAGS instead if you need to help 'configure' find header files. + +* `LD_LIBRARY_PATH="?"` + + 'ld' uses this colon-separated list to find libraries. + +* `LDFLAGS="?"` + + Pass these flags when linking. + +* `PATH="?"` + + 'configure' uses this to find programs. + +In some cases it may be necessary to work around configuration results that do +not match reality. For example, Linux 4.5 added support for the MADV_FREE flag +to madvise(2), which can cause problems if building on a host with MADV_FREE +support and deploying to a target without. To work around this, use a cache +file to override the relevant configuration variable defined in configure.ac, +e.g.: + + echo "je_cv_madv_free=no" > config.cache && ./configure -C + + +## Advanced compilation + +To build only parts of jemalloc, use the following targets: + + build_lib_shared + build_lib_static + build_lib + build_doc_html + build_doc_man + build_doc + +To install only parts of jemalloc, use the following targets: + + install_bin + install_include + install_lib_shared + install_lib_static + install_lib_pc + install_lib + install_doc_html + install_doc_man + install_doc + +To clean up build results to varying degrees, use the following make targets: + + clean + distclean + relclean + + +## Advanced installation + +Optionally, define make variables when invoking make, including (not +exclusively): + +* `INCLUDEDIR="?"` + + Use this as the installation prefix for header files. + +* `LIBDIR="?"` + + Use this as the installation prefix for libraries. + +* `MANDIR="?"` + + Use this as the installation prefix for man pages. + +* `DESTDIR="?"` + + Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful + when installing to a different path than was specified via --prefix. + +* `CC="?"` + + Use this to invoke the C compiler. + +* `CFLAGS="?"` + + Pass these flags to the compiler. + +* `CPPFLAGS="?"` + + Pass these flags to the C preprocessor. + +* `LDFLAGS="?"` + + Pass these flags when linking. + +* `PATH="?"` + + Use this to search for programs used during configuration and building. + + +## Development + +If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh' +script rather than 'configure'. This re-generates 'configure', enables +configuration dependency rules, and enables re-generation of automatically +generated source files. + +The build system supports using an object directory separate from the source +tree. For example, you can create an 'obj' directory, and from within that +directory, issue configuration and build commands: + + autoconf + mkdir obj + cd obj + ../configure --enable-autogen + make + + +## Documentation + +The manual page is generated in both html and roff formats. Any web browser +can be used to view the html manual. The roff manual page can be formatted +prior to installation via the following command: + + nroff -man -t doc/jemalloc.3 diff --git a/deps/jemalloc/Makefile.in b/deps/jemalloc/Makefile.in new file mode 100644 index 0000000..7128b00 --- /dev/null +++ b/deps/jemalloc/Makefile.in @@ -0,0 +1,623 @@ +# Clear out all vpaths, then set just one (default vpath) for the main build +# directory. +vpath +vpath % . + +# Clear the default suffixes, so that built-in rules are not used. +.SUFFIXES : + +SHELL := /bin/sh + +CC := @CC@ +CXX := @CXX@ + +# Configuration parameters. +DESTDIR = +BINDIR := $(DESTDIR)@BINDIR@ +INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@ +LIBDIR := $(DESTDIR)@LIBDIR@ +DATADIR := $(DESTDIR)@DATADIR@ +MANDIR := $(DESTDIR)@MANDIR@ +srcroot := @srcroot@ +objroot := @objroot@ +abs_srcroot := @abs_srcroot@ +abs_objroot := @abs_objroot@ + +# Build parameters. +CPPFLAGS := @CPPFLAGS@ -I$(objroot)include -I$(srcroot)include +CONFIGURE_CFLAGS := @CONFIGURE_CFLAGS@ +SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@ +EXTRA_CFLAGS := @EXTRA_CFLAGS@ +CFLAGS := $(strip $(CONFIGURE_CFLAGS) $(SPECIFIED_CFLAGS) $(EXTRA_CFLAGS)) +CONFIGURE_CXXFLAGS := @CONFIGURE_CXXFLAGS@ +SPECIFIED_CXXFLAGS := @SPECIFIED_CXXFLAGS@ +EXTRA_CXXFLAGS := @EXTRA_CXXFLAGS@ +CXXFLAGS := $(strip $(CONFIGURE_CXXFLAGS) $(SPECIFIED_CXXFLAGS) $(EXTRA_CXXFLAGS)) +LDFLAGS := @LDFLAGS@ +EXTRA_LDFLAGS := @EXTRA_LDFLAGS@ +LIBS := @LIBS@ +RPATH_EXTRA := @RPATH_EXTRA@ +SO := @so@ +IMPORTLIB := @importlib@ +O := @o@ +A := @a@ +EXE := @exe@ +LIBPREFIX := @libprefix@ +REV := @rev@ +install_suffix := @install_suffix@ +ABI := @abi@ +XSLTPROC := @XSLTPROC@ +XSLROOT := @XSLROOT@ +AUTOCONF := @AUTOCONF@ +_RPATH = @RPATH@ +RPATH = $(if $(1),$(call _RPATH,$(1))) +cfghdrs_in := $(addprefix $(srcroot),@cfghdrs_in@) +cfghdrs_out := @cfghdrs_out@ +cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@) +cfgoutputs_out := @cfgoutputs_out@ +enable_autogen := @enable_autogen@ +enable_doc := @enable_doc@ +enable_shared := @enable_shared@ +enable_static := @enable_static@ +enable_prof := @enable_prof@ +enable_zone_allocator := @enable_zone_allocator@ +enable_experimental_smallocx := @enable_experimental_smallocx@ +MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF +link_whole_archive := @link_whole_archive@ +DSO_LDFLAGS = @DSO_LDFLAGS@ +SOREV = @SOREV@ +PIC_CFLAGS = @PIC_CFLAGS@ +CTARGET = @CTARGET@ +LDTARGET = @LDTARGET@ +TEST_LD_MODE = @TEST_LD_MODE@ +MKLIB = @MKLIB@ +AR = @AR@ +ARFLAGS = @ARFLAGS@ +DUMP_SYMS = @DUMP_SYMS@ +AWK := @AWK@ +CC_MM = @CC_MM@ +LM := @LM@ +INSTALL = @INSTALL@ + +ifeq (macho, $(ABI)) +TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib" +else +ifeq (pecoff, $(ABI)) +TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib" +else +TEST_LIBRARY_PATH := +endif +endif + +LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix) + +# Lists of files. +BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof +C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h +C_SRCS := $(srcroot)src/jemalloc.c \ + $(srcroot)src/arena.c \ + $(srcroot)src/background_thread.c \ + $(srcroot)src/base.c \ + $(srcroot)src/bin.c \ + $(srcroot)src/bitmap.c \ + $(srcroot)src/ckh.c \ + $(srcroot)src/ctl.c \ + $(srcroot)src/div.c \ + $(srcroot)src/extent.c \ + $(srcroot)src/extent_dss.c \ + $(srcroot)src/extent_mmap.c \ + $(srcroot)src/hash.c \ + $(srcroot)src/hook.c \ + $(srcroot)src/large.c \ + $(srcroot)src/log.c \ + $(srcroot)src/malloc_io.c \ + $(srcroot)src/mutex.c \ + $(srcroot)src/mutex_pool.c \ + $(srcroot)src/nstime.c \ + $(srcroot)src/pages.c \ + $(srcroot)src/prng.c \ + $(srcroot)src/prof.c \ + $(srcroot)src/rtree.c \ + $(srcroot)src/safety_check.c \ + $(srcroot)src/stats.c \ + $(srcroot)src/sc.c \ + $(srcroot)src/sz.c \ + $(srcroot)src/tcache.c \ + $(srcroot)src/test_hooks.c \ + $(srcroot)src/ticker.c \ + $(srcroot)src/tsd.c \ + $(srcroot)src/witness.c +ifeq ($(enable_zone_allocator), 1) +C_SRCS += $(srcroot)src/zone.c +endif +ifeq ($(IMPORTLIB),$(SO)) +STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A) +endif +ifdef PIC_CFLAGS +STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A) +else +STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A) +endif +DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV) +ifneq ($(SOREV),$(SO)) +DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO) +endif +ifeq (1, $(link_whole_archive)) +LJEMALLOC := -Wl,--whole-archive -L$(objroot)lib -l$(LIBJEMALLOC) -Wl,--no-whole-archive +else +LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) +endif +PC := $(objroot)jemalloc.pc +MAN3 := $(objroot)doc/jemalloc$(install_suffix).3 +DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml +DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html) +DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3) +DOCS := $(DOCS_HTML) $(DOCS_MAN3) +C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \ + $(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \ + $(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \ + $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \ + $(srcroot)test/src/thd.c $(srcroot)test/src/timer.c +ifeq (1, $(link_whole_archive)) +C_UTIL_INTEGRATION_SRCS := +C_UTIL_CPP_SRCS := +else +C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c +C_UTIL_CPP_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c +endif +TESTS_UNIT := \ + $(srcroot)test/unit/a0.c \ + $(srcroot)test/unit/arena_reset.c \ + $(srcroot)test/unit/atomic.c \ + $(srcroot)test/unit/background_thread.c \ + $(srcroot)test/unit/background_thread_enable.c \ + $(srcroot)test/unit/base.c \ + $(srcroot)test/unit/bitmap.c \ + $(srcroot)test/unit/bit_util.c \ + $(srcroot)test/unit/binshard.c \ + $(srcroot)test/unit/ckh.c \ + $(srcroot)test/unit/decay.c \ + $(srcroot)test/unit/div.c \ + $(srcroot)test/unit/emitter.c \ + $(srcroot)test/unit/extent_quantize.c \ + $(srcroot)test/unit/extent_util.c \ + $(srcroot)test/unit/fork.c \ + $(srcroot)test/unit/hash.c \ + $(srcroot)test/unit/hook.c \ + $(srcroot)test/unit/huge.c \ + $(srcroot)test/unit/junk.c \ + $(srcroot)test/unit/junk_alloc.c \ + $(srcroot)test/unit/junk_free.c \ + $(srcroot)test/unit/log.c \ + $(srcroot)test/unit/mallctl.c \ + $(srcroot)test/unit/malloc_io.c \ + $(srcroot)test/unit/math.c \ + $(srcroot)test/unit/mq.c \ + $(srcroot)test/unit/mtx.c \ + $(srcroot)test/unit/pack.c \ + $(srcroot)test/unit/pages.c \ + $(srcroot)test/unit/ph.c \ + $(srcroot)test/unit/prng.c \ + $(srcroot)test/unit/prof_accum.c \ + $(srcroot)test/unit/prof_active.c \ + $(srcroot)test/unit/prof_gdump.c \ + $(srcroot)test/unit/prof_idump.c \ + $(srcroot)test/unit/prof_log.c \ + $(srcroot)test/unit/prof_reset.c \ + $(srcroot)test/unit/prof_tctx.c \ + $(srcroot)test/unit/prof_thread_name.c \ + $(srcroot)test/unit/ql.c \ + $(srcroot)test/unit/qr.c \ + $(srcroot)test/unit/rb.c \ + $(srcroot)test/unit/retained.c \ + $(srcroot)test/unit/rtree.c \ + $(srcroot)test/unit/safety_check.c \ + $(srcroot)test/unit/seq.c \ + $(srcroot)test/unit/SFMT.c \ + $(srcroot)test/unit/sc.c \ + $(srcroot)test/unit/size_classes.c \ + $(srcroot)test/unit/slab.c \ + $(srcroot)test/unit/smoothstep.c \ + $(srcroot)test/unit/spin.c \ + $(srcroot)test/unit/stats.c \ + $(srcroot)test/unit/stats_print.c \ + $(srcroot)test/unit/test_hooks.c \ + $(srcroot)test/unit/ticker.c \ + $(srcroot)test/unit/nstime.c \ + $(srcroot)test/unit/tsd.c \ + $(srcroot)test/unit/witness.c \ + $(srcroot)test/unit/zero.c +ifeq (@enable_prof@, 1) +TESTS_UNIT += \ + $(srcroot)test/unit/arena_reset_prof.c +endif +TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \ + $(srcroot)test/integration/allocated.c \ + $(srcroot)test/integration/extent.c \ + $(srcroot)test/integration/malloc.c \ + $(srcroot)test/integration/mallocx.c \ + $(srcroot)test/integration/MALLOCX_ARENA.c \ + $(srcroot)test/integration/overflow.c \ + $(srcroot)test/integration/posix_memalign.c \ + $(srcroot)test/integration/rallocx.c \ + $(srcroot)test/integration/sdallocx.c \ + $(srcroot)test/integration/slab_sizes.c \ + $(srcroot)test/integration/thread_arena.c \ + $(srcroot)test/integration/thread_tcache_enabled.c \ + $(srcroot)test/integration/xallocx.c +ifeq (@enable_experimental_smallocx@, 1) +TESTS_INTEGRATION += \ + $(srcroot)test/integration/smallocx.c +endif +ifeq (@enable_cxx@, 1) +CPP_SRCS := $(srcroot)src/jemalloc_cpp.cpp +TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp +else +CPP_SRCS := +TESTS_INTEGRATION_CPP := +endif +TESTS_STRESS := $(srcroot)test/stress/microbench.c \ + $(srcroot)test/stress/hookbench.c + + +TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) $(TESTS_STRESS) + +PRIVATE_NAMESPACE_HDRS := $(objroot)include/jemalloc/internal/private_namespace.h $(objroot)include/jemalloc/internal/private_namespace_jet.h +PRIVATE_NAMESPACE_GEN_HDRS := $(PRIVATE_NAMESPACE_HDRS:%.h=%.gen.h) +C_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym.$(O)) +C_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym) +C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O)) +CPP_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.$(O)) +C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O)) +CPP_PIC_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.pic.$(O)) +C_JET_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym.$(O)) +C_JET_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym) +C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O)) +C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O)) +C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) +C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) +C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O)) +C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_STRESS_OBJS) + +TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O)) +TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O)) +TESTS_INTEGRATION_CPP_OBJS := $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%.$(O)) +TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O)) +TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS) +TESTS_CPP_OBJS := $(TESTS_INTEGRATION_CPP_OBJS) + +.PHONY: all dist build_doc_html build_doc_man build_doc +.PHONY: install_bin install_include install_lib +.PHONY: install_doc_html install_doc_man install_doc install +.PHONY: tests check clean distclean relclean + +.SECONDARY : $(PRIVATE_NAMESPACE_GEN_HDRS) $(TESTS_OBJS) $(TESTS_CPP_OBJS) + +# Default target. +all: build_lib + +dist: build_doc + +$(objroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl +ifneq ($(XSLROOT),) + $(XSLTPROC) -o $@ $(objroot)doc/html.xsl $< +else +ifeq ($(wildcard $(DOCS_HTML)),) + @echo "

Missing xsltproc. Doc not built.

" > $@ +endif + @echo "Missing xsltproc. "$@" not (re)built." +endif + +$(objroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl +ifneq ($(XSLROOT),) + $(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $< +else +ifeq ($(wildcard $(DOCS_MAN3)),) + @echo "Missing xsltproc. Doc not built." > $@ +endif + @echo "Missing xsltproc. "$@" not (re)built." +endif + +build_doc_html: $(DOCS_HTML) +build_doc_man: $(DOCS_MAN3) +build_doc: $(DOCS) + +# +# Include generated dependency files. +# +ifdef CC_MM +-include $(C_SYM_OBJS:%.$(O)=%.d) +-include $(C_OBJS:%.$(O)=%.d) +-include $(CPP_OBJS:%.$(O)=%.d) +-include $(C_PIC_OBJS:%.$(O)=%.d) +-include $(CPP_PIC_OBJS:%.$(O)=%.d) +-include $(C_JET_SYM_OBJS:%.$(O)=%.d) +-include $(C_JET_OBJS:%.$(O)=%.d) +-include $(C_TESTLIB_OBJS:%.$(O)=%.d) +-include $(TESTS_OBJS:%.$(O)=%.d) +-include $(TESTS_CPP_OBJS:%.$(O)=%.d) +endif + +$(C_SYM_OBJS): $(objroot)src/%.sym.$(O): $(srcroot)src/%.c +$(C_SYM_OBJS): CPPFLAGS += -DJEMALLOC_NO_PRIVATE_NAMESPACE +$(C_SYMS): $(objroot)src/%.sym: $(objroot)src/%.sym.$(O) +$(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c +$(CPP_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.cpp +$(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c +$(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS) +$(CPP_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.cpp +$(CPP_PIC_OBJS): CXXFLAGS += $(PIC_CFLAGS) +$(C_JET_SYM_OBJS): $(objroot)src/%.jet.sym.$(O): $(srcroot)src/%.c +$(C_JET_SYM_OBJS): CPPFLAGS += -DJEMALLOC_JET -DJEMALLOC_NO_PRIVATE_NAMESPACE +$(C_JET_SYMS): $(objroot)src/%.jet.sym: $(objroot)src/%.jet.sym.$(O) +$(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c +$(C_JET_OBJS): CPPFLAGS += -DJEMALLOC_JET +$(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c +$(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST +$(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c +$(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST +$(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c +$(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c +$(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB +$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include +$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST +$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST +$(TESTS_INTEGRATION_CPP_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_CPP_TEST +$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST +$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c +$(TESTS_CPP_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.cpp +$(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include +$(TESTS_CPP_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include +ifneq ($(IMPORTLIB),$(SO)) +$(CPP_OBJS) $(C_SYM_OBJS) $(C_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT +endif + +# Dependencies. +ifndef CC_MM +HEADER_DIRS = $(srcroot)include/jemalloc/internal \ + $(objroot)include/jemalloc $(objroot)include/jemalloc/internal +HEADERS = $(filter-out $(PRIVATE_NAMESPACE_HDRS),$(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h))) +$(C_SYM_OBJS) $(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(HEADERS) +$(TESTS_OBJS) $(TESTS_CPP_OBJS): $(objroot)test/include/test/jemalloc_test.h +endif + +$(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_INTEGRATION_CPP_OBJS): $(objroot)include/jemalloc/internal/private_namespace.h +$(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_STRESS_OBJS) $(TESTS_UNIT_OBJS) $(TESTS_STRESS_OBJS): $(objroot)include/jemalloc/internal/private_namespace_jet.h + +$(C_SYM_OBJS) $(C_OBJS) $(C_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O): + @mkdir -p $(@D) + $(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $< +ifdef CC_MM + @$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< +endif + +$(C_SYMS): %.sym: + @mkdir -p $(@D) + $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols.awk > $@ + +$(C_JET_SYMS): %.sym: + @mkdir -p $(@D) + $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols_jet.awk > $@ + +$(objroot)include/jemalloc/internal/private_namespace.gen.h: $(C_SYMS) + $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@ + +$(objroot)include/jemalloc/internal/private_namespace_jet.gen.h: $(C_JET_SYMS) + $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@ + +%.h: %.gen.h + @if ! `cmp -s $< $@` ; then echo "cp $< $<"; cp $< $@ ; fi + +$(CPP_OBJS) $(CPP_PIC_OBJS) $(TESTS_CPP_OBJS): %.$(O): + @mkdir -p $(@D) + $(CXX) $(CXXFLAGS) -c $(CPPFLAGS) $(CTARGET) $< +ifdef CC_MM + @$(CXX) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< +endif + +ifneq ($(SOREV),$(SO)) +%.$(SO) : %.$(SOREV) + @mkdir -p $(@D) + ln -sf $( $(srcroot)config.stamp.in + +$(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure + ./$(objroot)config.status + @touch $@ + +# There must be some action in order for make to re-read Makefile when it is +# out of date. +$(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp + @true +endif diff --git a/deps/jemalloc/README b/deps/jemalloc/README new file mode 100644 index 0000000..3a6e0d2 --- /dev/null +++ b/deps/jemalloc/README @@ -0,0 +1,20 @@ +jemalloc is a general purpose malloc(3) implementation that emphasizes +fragmentation avoidance and scalable concurrency support. jemalloc first came +into use as the FreeBSD libc allocator in 2005, and since then it has found its +way into numerous applications that rely on its predictable behavior. In 2010 +jemalloc development efforts broadened to include developer support features +such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc +releases continue to be integrated back into FreeBSD, and therefore versatility +remains critical. Ongoing development efforts trend toward making jemalloc +among the best allocators for a broad range of demanding applications, and +eliminating/mitigating weaknesses that have practical repercussions for real +world applications. + +The COPYING file contains copyright and licensing information. + +The INSTALL file contains information on how to configure, build, and install +jemalloc. + +The ChangeLog file contains a brief summary of changes for each release. + +URL: http://jemalloc.net/ diff --git a/deps/jemalloc/TUNING.md b/deps/jemalloc/TUNING.md new file mode 100644 index 0000000..34fca05 --- /dev/null +++ b/deps/jemalloc/TUNING.md @@ -0,0 +1,129 @@ +This document summarizes the common approaches for performance fine tuning with +jemalloc (as of 5.1.0). The default configuration of jemalloc tends to work +reasonably well in practice, and most applications should not have to tune any +options. However, in order to cover a wide range of applications and avoid +pathological cases, the default setting is sometimes kept conservative and +suboptimal, even for many common workloads. When jemalloc is properly tuned for +a specific application / workload, it is common to improve system level metrics +by a few percent, or make favorable trade-offs. + + +## Notable runtime options for performance tuning + +Runtime options can be set via +[malloc_conf](http://jemalloc.net/jemalloc.3.html#tuning). + +* [background_thread](http://jemalloc.net/jemalloc.3.html#background_thread) + + Enabling jemalloc background threads generally improves the tail latency for + application threads, since unused memory purging is shifted to the dedicated + background threads. In addition, unintended purging delay caused by + application inactivity is avoided with background threads. + + Suggested: `background_thread:true` when jemalloc managed threads can be + allowed. + +* [metadata_thp](http://jemalloc.net/jemalloc.3.html#opt.metadata_thp) + + Allowing jemalloc to utilize transparent huge pages for its internal + metadata usually reduces TLB misses significantly, especially for programs + with large memory footprint and frequent allocation / deallocation + activities. Metadata memory usage may increase due to the use of huge + pages. + + Suggested for allocation intensive programs: `metadata_thp:auto` or + `metadata_thp:always`, which is expected to improve CPU utilization at a + small memory cost. + +* [dirty_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.dirty_decay_ms) and + [muzzy_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.muzzy_decay_ms) + + Decay time determines how fast jemalloc returns unused pages back to the + operating system, and therefore provides a fairly straightforward trade-off + between CPU and memory usage. Shorter decay time purges unused pages faster + to reduces memory usage (usually at the cost of more CPU cycles spent on + purging), and vice versa. + + Suggested: tune the values based on the desired trade-offs. + +* [narenas](http://jemalloc.net/jemalloc.3.html#opt.narenas) + + By default jemalloc uses multiple arenas to reduce internal lock contention. + However high arena count may also increase overall memory fragmentation, + since arenas manage memory independently. When high degree of parallelism + is not expected at the allocator level, lower number of arenas often + improves memory usage. + + Suggested: if low parallelism is expected, try lower arena count while + monitoring CPU and memory usage. + +* [percpu_arena](http://jemalloc.net/jemalloc.3.html#opt.percpu_arena) + + Enable dynamic thread to arena association based on running CPU. This has + the potential to improve locality, e.g. when thread to CPU affinity is + present. + + Suggested: try `percpu_arena:percpu` or `percpu_arena:phycpu` if + thread migration between processors is expected to be infrequent. + +Examples: + +* High resource consumption application, prioritizing CPU utilization: + + `background_thread:true,metadata_thp:auto` combined with relaxed decay time + (increased `dirty_decay_ms` and / or `muzzy_decay_ms`, + e.g. `dirty_decay_ms:30000,muzzy_decay_ms:30000`). + +* High resource consumption application, prioritizing memory usage: + + `background_thread:true` combined with shorter decay time (decreased + `dirty_decay_ms` and / or `muzzy_decay_ms`, + e.g. `dirty_decay_ms:5000,muzzy_decay_ms:5000`), and lower arena count + (e.g. number of CPUs). + +* Low resource consumption application: + + `narenas:1,lg_tcache_max:13` combined with shorter decay time (decreased + `dirty_decay_ms` and / or `muzzy_decay_ms`,e.g. + `dirty_decay_ms:1000,muzzy_decay_ms:0`). + +* Extremely conservative -- minimize memory usage at all costs, only suitable when +allocation activity is very rare: + + `narenas:1,tcache:false,dirty_decay_ms:0,muzzy_decay_ms:0` + +Note that it is recommended to combine the options with `abort_conf:true` which +aborts immediately on illegal options. + +## Beyond runtime options + +In addition to the runtime options, there are a number of programmatic ways to +improve application performance with jemalloc. + +* [Explicit arenas](http://jemalloc.net/jemalloc.3.html#arenas.create) + + Manually created arenas can help performance in various ways, e.g. by + managing locality and contention for specific usages. For example, + applications can explicitly allocate frequently accessed objects from a + dedicated arena with + [mallocx()](http://jemalloc.net/jemalloc.3.html#MALLOCX_ARENA) to improve + locality. In addition, explicit arenas often benefit from individually + tuned options, e.g. relaxed [decay + time](http://jemalloc.net/jemalloc.3.html#arena.i.dirty_decay_ms) if + frequent reuse is expected. + +* [Extent hooks](http://jemalloc.net/jemalloc.3.html#arena.i.extent_hooks) + + Extent hooks allow customization for managing underlying memory. One use + case for performance purpose is to utilize huge pages -- for example, + [HHVM](https://github.com/facebook/hhvm/blob/master/hphp/util/alloc.cpp) + uses explicit arenas with customized extent hooks to manage 1GB huge pages + for frequently accessed data, which reduces TLB misses significantly. + +* [Explicit thread-to-arena + binding](http://jemalloc.net/jemalloc.3.html#thread.arena) + + It is common for some threads in an application to have different memory + access / allocation patterns. Threads with heavy workloads often benefit + from explicit binding, e.g. binding very active threads to dedicated arenas + may reduce contention at the allocator level. diff --git a/deps/jemalloc/VERSION b/deps/jemalloc/VERSION new file mode 100644 index 0000000..05500db --- /dev/null +++ b/deps/jemalloc/VERSION @@ -0,0 +1 @@ +5.2.1-0-g0 diff --git a/deps/jemalloc/autogen.sh b/deps/jemalloc/autogen.sh new file mode 100755 index 0000000..75f32da --- /dev/null +++ b/deps/jemalloc/autogen.sh @@ -0,0 +1,17 @@ +#!/bin/sh + +for i in autoconf; do + echo "$i" + $i + if [ $? -ne 0 ]; then + echo "Error $? in $i" + exit 1 + fi +done + +echo "./configure --enable-autogen $@" +./configure --enable-autogen $@ +if [ $? -ne 0 ]; then + echo "Error $? in ./configure" + exit 1 +fi diff --git a/deps/jemalloc/bin/jemalloc-config.in b/deps/jemalloc/bin/jemalloc-config.in new file mode 100644 index 0000000..80eca2e --- /dev/null +++ b/deps/jemalloc/bin/jemalloc-config.in @@ -0,0 +1,83 @@ +#!/bin/sh + +usage() { + cat < +Options: + --help | -h : Print usage. + --version : Print jemalloc version. + --revision : Print shared library revision number. + --config : Print configure options used to build jemalloc. + --prefix : Print installation directory prefix. + --bindir : Print binary installation directory. + --datadir : Print data installation directory. + --includedir : Print include installation directory. + --libdir : Print library installation directory. + --mandir : Print manual page installation directory. + --cc : Print compiler used to build jemalloc. + --cflags : Print compiler flags used to build jemalloc. + --cppflags : Print preprocessor flags used to build jemalloc. + --cxxflags : Print C++ compiler flags used to build jemalloc. + --ldflags : Print library flags used to build jemalloc. + --libs : Print libraries jemalloc was linked against. +EOF +} + +prefix="@prefix@" +exec_prefix="@exec_prefix@" + +case "$1" in +--help | -h) + usage + exit 0 + ;; +--version) + echo "@jemalloc_version@" + ;; +--revision) + echo "@rev@" + ;; +--config) + echo "@CONFIG@" + ;; +--prefix) + echo "@PREFIX@" + ;; +--bindir) + echo "@BINDIR@" + ;; +--datadir) + echo "@DATADIR@" + ;; +--includedir) + echo "@INCLUDEDIR@" + ;; +--libdir) + echo "@LIBDIR@" + ;; +--mandir) + echo "@MANDIR@" + ;; +--cc) + echo "@CC@" + ;; +--cflags) + echo "@CFLAGS@" + ;; +--cppflags) + echo "@CPPFLAGS@" + ;; +--cxxflags) + echo "@CXXFLAGS@" + ;; +--ldflags) + echo "@LDFLAGS@ @EXTRA_LDFLAGS@" + ;; +--libs) + echo "@LIBS@" + ;; +*) + usage + exit 1 +esac diff --git a/deps/jemalloc/bin/jemalloc.sh.in b/deps/jemalloc/bin/jemalloc.sh.in new file mode 100644 index 0000000..cdf3673 --- /dev/null +++ b/deps/jemalloc/bin/jemalloc.sh.in @@ -0,0 +1,9 @@ +#!/bin/sh + +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ + +@LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@ +export @LD_PRELOAD_VAR@ +exec "$@" diff --git a/deps/jemalloc/bin/jeprof.in b/deps/jemalloc/bin/jeprof.in new file mode 100644 index 0000000..3ed408c --- /dev/null +++ b/deps/jemalloc/bin/jeprof.in @@ -0,0 +1,5625 @@ +#! /usr/bin/env perl + +# Copyright (c) 1998-2007, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# --- +# Program for printing the profile generated by common/profiler.cc, +# or by the heap profiler (common/debugallocation.cc) +# +# The profile contains a sequence of entries of the form: +# +# This program parses the profile, and generates user-readable +# output. +# +# Examples: +# +# % tools/jeprof "program" "profile" +# Enters "interactive" mode +# +# % tools/jeprof --text "program" "profile" +# Generates one line per procedure +# +# % tools/jeprof --gv "program" "profile" +# Generates annotated call-graph and displays via "gv" +# +# % tools/jeprof --gv --focus=Mutex "program" "profile" +# Restrict to code paths that involve an entry that matches "Mutex" +# +# % tools/jeprof --gv --focus=Mutex --ignore=string "program" "profile" +# Restrict to code paths that involve an entry that matches "Mutex" +# and does not match "string" +# +# % tools/jeprof --list=IBF_CheckDocid "program" "profile" +# Generates disassembly listing of all routines with at least one +# sample that match the --list= pattern. The listing is +# annotated with the flat and cumulative sample counts at each line. +# +# % tools/jeprof --disasm=IBF_CheckDocid "program" "profile" +# Generates disassembly listing of all routines with at least one +# sample that match the --disasm= pattern. The listing is +# annotated with the flat and cumulative sample counts at each PC value. +# +# TODO: Use color to indicate files? + +use strict; +use warnings; +use Getopt::Long; +use Cwd; + +my $JEPROF_VERSION = "@jemalloc_version@"; +my $PPROF_VERSION = "2.0"; + +# These are the object tools we use which can come from a +# user-specified location using --tools, from the JEPROF_TOOLS +# environment variable, or from the environment. +my %obj_tool_map = ( + "objdump" => "objdump", + "nm" => "nm", + "addr2line" => "addr2line", + "c++filt" => "c++filt", + ## ConfigureObjTools may add architecture-specific entries: + #"nm_pdb" => "nm-pdb", # for reading windows (PDB-format) executables + #"addr2line_pdb" => "addr2line-pdb", # ditto + #"otool" => "otool", # equivalent of objdump on OS X +); +# NOTE: these are lists, so you can put in commandline flags if you want. +my @DOT = ("dot"); # leave non-absolute, since it may be in /usr/local +my @GV = ("gv"); +my @EVINCE = ("evince"); # could also be xpdf or perhaps acroread +my @KCACHEGRIND = ("kcachegrind"); +my @PS2PDF = ("ps2pdf"); +# These are used for dynamic profiles +my @URL_FETCHER = ("curl", "-s", "--fail"); + +# These are the web pages that servers need to support for dynamic profiles +my $HEAP_PAGE = "/pprof/heap"; +my $PROFILE_PAGE = "/pprof/profile"; # must support cgi-param "?seconds=#" +my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param + # ?seconds=#&event=x&period=n +my $GROWTH_PAGE = "/pprof/growth"; +my $CONTENTION_PAGE = "/pprof/contention"; +my $WALL_PAGE = "/pprof/wall(?:\\?.*)?"; # accepts options like namefilter +my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?"; +my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param + # "?seconds=#", + # "?tags_regexp=#" and + # "?type=#". +my $SYMBOL_PAGE = "/pprof/symbol"; # must support symbol lookup via POST +my $PROGRAM_NAME_PAGE = "/pprof/cmdline"; + +# These are the web pages that can be named on the command line. +# All the alternatives must begin with /. +my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" . + "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" . + "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)"; + +# default binary name +my $UNKNOWN_BINARY = "(unknown)"; + +# There is a pervasive dependency on the length (in hex characters, +# i.e., nibbles) of an address, distinguishing between 32-bit and +# 64-bit profiles. To err on the safe size, default to 64-bit here: +my $address_length = 16; + +my $dev_null = "/dev/null"; +if (! -e $dev_null && $^O =~ /MSWin/) { # $^O is the OS perl was built for + $dev_null = "nul"; +} + +# A list of paths to search for shared object files +my @prefix_list = (); + +# Special routine name that should not have any symbols. +# Used as separator to parse "addr2line -i" output. +my $sep_symbol = '_fini'; +my $sep_address = undef; + +##### Argument parsing ##### + +sub usage_string { + return < + is a space separated list of profile names. +jeprof [options] + is a list of profile files where each file contains + the necessary symbol mappings as well as profile data (likely generated + with --raw). +jeprof [options] + is a remote form. Symbols are obtained from host:port$SYMBOL_PAGE + + Each name can be: + /path/to/profile - a path to a profile file + host:port[/] - a location of a service to get profile from + + The / can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile, + $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall, + $CENSUSPROFILE_PAGE, or /pprof/filteredprofile. + For instance: + jeprof http://myserver.com:80$HEAP_PAGE + If / is omitted, the service defaults to $PROFILE_PAGE (cpu profiling). +jeprof --symbols + Maps addresses to symbol names. In this mode, stdin should be a + list of library mappings, in the same format as is found in the heap- + and cpu-profile files (this loosely matches that of /proc/self/maps + on linux), followed by a list of hex addresses to map, one per line. + + For more help with querying remote servers, including how to add the + necessary server-side support code, see this filename (or one like it): + + /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html + +Options: + --cum Sort by cumulative data + --base= Subtract from before display + --interactive Run in interactive mode (interactive "help" gives help) [default] + --seconds= Length of time for dynamic profiles [default=30 secs] + --add_lib= Read additional symbols and line info from the given library + --lib_prefix= Comma separated list of library path prefixes + +Reporting Granularity: + --addresses Report at address level + --lines Report at source line level + --functions Report at function level [default] + --files Report at source file level + +Output type: + --text Generate text report + --callgrind Generate callgrind format to stdout + --gv Generate Postscript and display + --evince Generate PDF and display + --web Generate SVG and display + --list= Generate source listing of matching routines + --disasm= Generate disassembly of matching routines + --symbols Print demangled symbol names found at given addresses + --dot Generate DOT file to stdout + --ps Generate Postcript to stdout + --pdf Generate PDF to stdout + --svg Generate SVG to stdout + --gif Generate GIF to stdout + --raw Generate symbolized jeprof data (useful with remote fetch) + +Heap-Profile Options: + --inuse_space Display in-use (mega)bytes [default] + --inuse_objects Display in-use objects + --alloc_space Display allocated (mega)bytes + --alloc_objects Display allocated objects + --show_bytes Display space in bytes + --drop_negative Ignore negative differences + +Contention-profile options: + --total_delay Display total delay at each region [default] + --contentions Display number of delays at each region + --mean_delay Display mean delay at each region + +Call-graph Options: + --nodecount= Show at most so many nodes [default=80] + --nodefraction= Hide nodes below *total [default=.005] + --edgefraction= Hide edges below *total [default=.001] + --maxdegree= Max incoming/outgoing edges per node [default=8] + --focus= Focus on backtraces with nodes matching + --thread= Show profile for thread + --ignore= Ignore backtraces with nodes matching + --scale= Set GV scaling [default=0] + --heapcheck Make nodes with non-0 object counts + (i.e. direct leak generators) more visible + --retain= Retain only nodes that match + --exclude= Exclude all nodes that match + +Miscellaneous: + --tools=[,...] \$PATH for object tool pathnames + --test Run unit tests + --help This message + --version Version information + +Environment Variables: + JEPROF_TMPDIR Profiles directory. Defaults to \$HOME/jeprof + JEPROF_TOOLS Prefix for object tools pathnames + +Examples: + +jeprof /bin/ls ls.prof + Enters "interactive" mode +jeprof --text /bin/ls ls.prof + Outputs one line per procedure +jeprof --web /bin/ls ls.prof + Displays annotated call-graph in web browser +jeprof --gv /bin/ls ls.prof + Displays annotated call-graph via 'gv' +jeprof --gv --focus=Mutex /bin/ls ls.prof + Restricts to code paths including a .*Mutex.* entry +jeprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof + Code paths including Mutex but not string +jeprof --list=getdir /bin/ls ls.prof + (Per-line) annotated source listing for getdir() +jeprof --disasm=getdir /bin/ls ls.prof + (Per-PC) annotated disassembly for getdir() + +jeprof http://localhost:1234/ + Enters "interactive" mode +jeprof --text localhost:1234 + Outputs one line per procedure for localhost:1234 +jeprof --raw localhost:1234 > ./local.raw +jeprof --text ./local.raw + Fetches a remote profile for later analysis and then + analyzes it in text mode. +EOF +} + +sub version_string { + return < \$main::opt_help, + "version!" => \$main::opt_version, + "cum!" => \$main::opt_cum, + "base=s" => \$main::opt_base, + "seconds=i" => \$main::opt_seconds, + "add_lib=s" => \$main::opt_lib, + "lib_prefix=s" => \$main::opt_lib_prefix, + "functions!" => \$main::opt_functions, + "lines!" => \$main::opt_lines, + "addresses!" => \$main::opt_addresses, + "files!" => \$main::opt_files, + "text!" => \$main::opt_text, + "callgrind!" => \$main::opt_callgrind, + "list=s" => \$main::opt_list, + "disasm=s" => \$main::opt_disasm, + "symbols!" => \$main::opt_symbols, + "gv!" => \$main::opt_gv, + "evince!" => \$main::opt_evince, + "web!" => \$main::opt_web, + "dot!" => \$main::opt_dot, + "ps!" => \$main::opt_ps, + "pdf!" => \$main::opt_pdf, + "svg!" => \$main::opt_svg, + "gif!" => \$main::opt_gif, + "raw!" => \$main::opt_raw, + "interactive!" => \$main::opt_interactive, + "nodecount=i" => \$main::opt_nodecount, + "nodefraction=f" => \$main::opt_nodefraction, + "edgefraction=f" => \$main::opt_edgefraction, + "maxdegree=i" => \$main::opt_maxdegree, + "focus=s" => \$main::opt_focus, + "thread=s" => \$main::opt_thread, + "ignore=s" => \$main::opt_ignore, + "scale=i" => \$main::opt_scale, + "heapcheck" => \$main::opt_heapcheck, + "retain=s" => \$main::opt_retain, + "exclude=s" => \$main::opt_exclude, + "inuse_space!" => \$main::opt_inuse_space, + "inuse_objects!" => \$main::opt_inuse_objects, + "alloc_space!" => \$main::opt_alloc_space, + "alloc_objects!" => \$main::opt_alloc_objects, + "show_bytes!" => \$main::opt_show_bytes, + "drop_negative!" => \$main::opt_drop_negative, + "total_delay!" => \$main::opt_total_delay, + "contentions!" => \$main::opt_contentions, + "mean_delay!" => \$main::opt_mean_delay, + "tools=s" => \$main::opt_tools, + "test!" => \$main::opt_test, + "debug!" => \$main::opt_debug, + # Undocumented flags used only by unittests: + "test_stride=i" => \$main::opt_test_stride, + ) || usage("Invalid option(s)"); + + # Deal with the standard --help and --version + if ($main::opt_help) { + print usage_string(); + exit(0); + } + + if ($main::opt_version) { + print version_string(); + exit(0); + } + + # Disassembly/listing/symbols mode requires address-level info + if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) { + $main::opt_functions = 0; + $main::opt_lines = 0; + $main::opt_addresses = 1; + $main::opt_files = 0; + } + + # Check heap-profiling flags + if ($main::opt_inuse_space + + $main::opt_inuse_objects + + $main::opt_alloc_space + + $main::opt_alloc_objects > 1) { + usage("Specify at most on of --inuse/--alloc options"); + } + + # Check output granularities + my $grains = + $main::opt_functions + + $main::opt_lines + + $main::opt_addresses + + $main::opt_files + + 0; + if ($grains > 1) { + usage("Only specify one output granularity option"); + } + if ($grains == 0) { + $main::opt_functions = 1; + } + + # Check output modes + my $modes = + $main::opt_text + + $main::opt_callgrind + + ($main::opt_list eq '' ? 0 : 1) + + ($main::opt_disasm eq '' ? 0 : 1) + + ($main::opt_symbols == 0 ? 0 : 1) + + $main::opt_gv + + $main::opt_evince + + $main::opt_web + + $main::opt_dot + + $main::opt_ps + + $main::opt_pdf + + $main::opt_svg + + $main::opt_gif + + $main::opt_raw + + $main::opt_interactive + + 0; + if ($modes > 1) { + usage("Only specify one output mode"); + } + if ($modes == 0) { + if (-t STDOUT) { # If STDOUT is a tty, activate interactive mode + $main::opt_interactive = 1; + } else { + $main::opt_text = 1; + } + } + + if ($main::opt_test) { + RunUnitTests(); + # Should not return + exit(1); + } + + # Binary name and profile arguments list + $main::prog = ""; + @main::pfile_args = (); + + # Remote profiling without a binary (using $SYMBOL_PAGE instead) + if (@ARGV > 0) { + if (IsProfileURL($ARGV[0])) { + $main::use_symbol_page = 1; + } elsif (IsSymbolizedProfileFile($ARGV[0])) { + $main::use_symbolized_profile = 1; + $main::prog = $UNKNOWN_BINARY; # will be set later from the profile file + } + } + + if ($main::use_symbol_page || $main::use_symbolized_profile) { + # We don't need a binary! + my %disabled = ('--lines' => $main::opt_lines, + '--disasm' => $main::opt_disasm); + for my $option (keys %disabled) { + usage("$option cannot be used without a binary") if $disabled{$option}; + } + # Set $main::prog later... + scalar(@ARGV) || usage("Did not specify profile file"); + } elsif ($main::opt_symbols) { + # --symbols needs a binary-name (to run nm on, etc) but not profiles + $main::prog = shift(@ARGV) || usage("Did not specify program"); + } else { + $main::prog = shift(@ARGV) || usage("Did not specify program"); + scalar(@ARGV) || usage("Did not specify profile file"); + } + + # Parse profile file/location arguments + foreach my $farg (@ARGV) { + if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) { + my $machine = $1; + my $num_machines = $2; + my $path = $3; + for (my $i = 0; $i < $num_machines; $i++) { + unshift(@main::pfile_args, "$i.$machine$path"); + } + } else { + unshift(@main::pfile_args, $farg); + } + } + + if ($main::use_symbol_page) { + unless (IsProfileURL($main::pfile_args[0])) { + error("The first profile should be a remote form to use $SYMBOL_PAGE\n"); + } + CheckSymbolPage(); + $main::prog = FetchProgramName(); + } elsif (!$main::use_symbolized_profile) { # may not need objtools! + ConfigureObjTools($main::prog) + } + + # Break the opt_lib_prefix into the prefix_list array + @prefix_list = split (',', $main::opt_lib_prefix); + + # Remove trailing / from the prefixes, in the list to prevent + # searching things like /my/path//lib/mylib.so + foreach (@prefix_list) { + s|/+$||; + } +} + +sub FilterAndPrint { + my ($profile, $symbols, $libs, $thread) = @_; + + # Get total data in profile + my $total = TotalProfile($profile); + + # Remove uniniteresting stack items + $profile = RemoveUninterestingFrames($symbols, $profile); + + # Focus? + if ($main::opt_focus ne '') { + $profile = FocusProfile($symbols, $profile, $main::opt_focus); + } + + # Ignore? + if ($main::opt_ignore ne '') { + $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore); + } + + my $calls = ExtractCalls($symbols, $profile); + + # Reduce profiles to required output granularity, and also clean + # each stack trace so a given entry exists at most once. + my $reduced = ReduceProfile($symbols, $profile); + + # Get derived profiles + my $flat = FlatProfile($reduced); + my $cumulative = CumulativeProfile($reduced); + + # Print + if (!$main::opt_interactive) { + if ($main::opt_disasm) { + PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm); + } elsif ($main::opt_list) { + PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0); + } elsif ($main::opt_text) { + # Make sure the output is empty when have nothing to report + # (only matters when --heapcheck is given but we must be + # compatible with old branches that did not pass --heapcheck always): + if ($total != 0) { + printf("Total%s: %s %s\n", + (defined($thread) ? " (t$thread)" : ""), + Unparse($total), Units()); + } + PrintText($symbols, $flat, $cumulative, -1); + } elsif ($main::opt_raw) { + PrintSymbolizedProfile($symbols, $profile, $main::prog); + } elsif ($main::opt_callgrind) { + PrintCallgrind($calls); + } else { + if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { + if ($main::opt_gv) { + RunGV(TempName($main::next_tmpfile, "ps"), ""); + } elsif ($main::opt_evince) { + RunEvince(TempName($main::next_tmpfile, "pdf"), ""); + } elsif ($main::opt_web) { + my $tmp = TempName($main::next_tmpfile, "svg"); + RunWeb($tmp); + # The command we run might hand the file name off + # to an already running browser instance and then exit. + # Normally, we'd remove $tmp on exit (right now), + # but fork a child to remove $tmp a little later, so that the + # browser has time to load it first. + delete $main::tempnames{$tmp}; + if (fork() == 0) { + sleep 5; + unlink($tmp); + exit(0); + } + } + } else { + cleanup(); + exit(1); + } + } + } else { + InteractiveMode($profile, $symbols, $libs, $total); + } +} + +sub Main() { + Init(); + $main::collected_profile = undef; + @main::profile_files = (); + $main::op_time = time(); + + # Printing symbols is special and requires a lot less info that most. + if ($main::opt_symbols) { + PrintSymbols(*STDIN); # Get /proc/maps and symbols output from stdin + return; + } + + # Fetch all profile data + FetchDynamicProfiles(); + + # this will hold symbols that we read from the profile files + my $symbol_map = {}; + + # Read one profile, pick the last item on the list + my $data = ReadProfile($main::prog, pop(@main::profile_files)); + my $profile = $data->{profile}; + my $pcs = $data->{pcs}; + my $libs = $data->{libs}; # Info about main program and shared libraries + $symbol_map = MergeSymbols($symbol_map, $data->{symbols}); + + # Add additional profiles, if available. + if (scalar(@main::profile_files) > 0) { + foreach my $pname (@main::profile_files) { + my $data2 = ReadProfile($main::prog, $pname); + $profile = AddProfile($profile, $data2->{profile}); + $pcs = AddPcs($pcs, $data2->{pcs}); + $symbol_map = MergeSymbols($symbol_map, $data2->{symbols}); + } + } + + # Subtract base from profile, if specified + if ($main::opt_base ne '') { + my $base = ReadProfile($main::prog, $main::opt_base); + $profile = SubtractProfile($profile, $base->{profile}); + $pcs = AddPcs($pcs, $base->{pcs}); + $symbol_map = MergeSymbols($symbol_map, $base->{symbols}); + } + + # Collect symbols + my $symbols; + if ($main::use_symbolized_profile) { + $symbols = FetchSymbols($pcs, $symbol_map); + } elsif ($main::use_symbol_page) { + $symbols = FetchSymbols($pcs); + } else { + # TODO(csilvers): $libs uses the /proc/self/maps data from profile1, + # which may differ from the data from subsequent profiles, especially + # if they were run on different machines. Use appropriate libs for + # each pc somehow. + $symbols = ExtractSymbols($libs, $pcs); + } + + if (!defined($main::opt_thread)) { + FilterAndPrint($profile, $symbols, $libs); + } + if (defined($data->{threads})) { + foreach my $thread (sort { $a <=> $b } keys(%{$data->{threads}})) { + if (defined($main::opt_thread) && + ($main::opt_thread eq '*' || $main::opt_thread == $thread)) { + my $thread_profile = $data->{threads}{$thread}; + FilterAndPrint($thread_profile, $symbols, $libs, $thread); + } + } + } + + cleanup(); + exit(0); +} + +##### Entry Point ##### + +Main(); + +# Temporary code to detect if we're running on a Goobuntu system. +# These systems don't have the right stuff installed for the special +# Readline libraries to work, so as a temporary workaround, we default +# to using the normal stdio code, rather than the fancier readline-based +# code +sub ReadlineMightFail { + if (-e '/lib/libtermcap.so.2') { + return 0; # libtermcap exists, so readline should be okay + } else { + return 1; + } +} + +sub RunGV { + my $fname = shift; + my $bg = shift; # "" or " &" if we should run in background + if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) { + # Options using double dash are supported by this gv version. + # Also, turn on noantialias to better handle bug in gv for + # postscript files with large dimensions. + # TODO: Maybe we should not pass the --noantialias flag + # if the gv version is known to work properly without the flag. + system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname) + . $bg); + } else { + # Old gv version - only supports options that use single dash. + print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n"; + system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg); + } +} + +sub RunEvince { + my $fname = shift; + my $bg = shift; # "" or " &" if we should run in background + system(ShellEscape(@EVINCE, $fname) . $bg); +} + +sub RunWeb { + my $fname = shift; + print STDERR "Loading web page file:///$fname\n"; + + if (`uname` =~ /Darwin/) { + # OS X: open will use standard preference for SVG files. + system("/usr/bin/open", $fname); + return; + } + + # Some kind of Unix; try generic symlinks, then specific browsers. + # (Stop once we find one.) + # Works best if the browser is already running. + my @alt = ( + "/etc/alternatives/gnome-www-browser", + "/etc/alternatives/x-www-browser", + "google-chrome", + "firefox", + ); + foreach my $b (@alt) { + if (system($b, $fname) == 0) { + return; + } + } + + print STDERR "Could not load web browser.\n"; +} + +sub RunKcachegrind { + my $fname = shift; + my $bg = shift; # "" or " &" if we should run in background + print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n"; + system(ShellEscape(@KCACHEGRIND, $fname) . $bg); +} + + +##### Interactive helper routines ##### + +sub InteractiveMode { + $| = 1; # Make output unbuffered for interactive mode + my ($orig_profile, $symbols, $libs, $total) = @_; + + print STDERR "Welcome to jeprof! For help, type 'help'.\n"; + + # Use ReadLine if it's installed and input comes from a console. + if ( -t STDIN && + !ReadlineMightFail() && + defined(eval {require Term::ReadLine}) ) { + my $term = new Term::ReadLine 'jeprof'; + while ( defined ($_ = $term->readline('(jeprof) '))) { + $term->addhistory($_) if /\S/; + if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { + last; # exit when we get an interactive command to quit + } + } + } else { # don't have readline + while (1) { + print STDERR "(jeprof) "; + $_ = ; + last if ! defined $_ ; + s/\r//g; # turn windows-looking lines into unix-looking lines + + # Save some flags that might be reset by InteractiveCommand() + my $save_opt_lines = $main::opt_lines; + + if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) { + last; # exit when we get an interactive command to quit + } + + # Restore flags + $main::opt_lines = $save_opt_lines; + } + } +} + +# Takes two args: orig profile, and command to run. +# Returns 1 if we should keep going, or 0 if we were asked to quit +sub InteractiveCommand { + my($orig_profile, $symbols, $libs, $total, $command) = @_; + $_ = $command; # just to make future m//'s easier + if (!defined($_)) { + print STDERR "\n"; + return 0; + } + if (m/^\s*quit/) { + return 0; + } + if (m/^\s*help/) { + InteractiveHelpMessage(); + return 1; + } + # Clear all the mode options -- mode is controlled by "$command" + $main::opt_text = 0; + $main::opt_callgrind = 0; + $main::opt_disasm = 0; + $main::opt_list = 0; + $main::opt_gv = 0; + $main::opt_evince = 0; + $main::opt_cum = 0; + + if (m/^\s*(text|top)(\d*)\s*(.*)/) { + $main::opt_text = 1; + + my $line_limit = ($2 ne "") ? int($2) : 10; + + my $routine; + my $ignore; + ($routine, $ignore) = ParseInteractiveArgs($3); + + my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); + my $reduced = ReduceProfile($symbols, $profile); + + # Get derived profiles + my $flat = FlatProfile($reduced); + my $cumulative = CumulativeProfile($reduced); + + PrintText($symbols, $flat, $cumulative, $line_limit); + return 1; + } + if (m/^\s*callgrind\s*([^ \n]*)/) { + $main::opt_callgrind = 1; + + # Get derived profiles + my $calls = ExtractCalls($symbols, $orig_profile); + my $filename = $1; + if ( $1 eq '' ) { + $filename = TempName($main::next_tmpfile, "callgrind"); + } + PrintCallgrind($calls, $filename); + if ( $1 eq '' ) { + RunKcachegrind($filename, " & "); + $main::next_tmpfile++; + } + + return 1; + } + if (m/^\s*(web)?list\s*(.+)/) { + my $html = (defined($1) && ($1 eq "web")); + $main::opt_list = 1; + + my $routine; + my $ignore; + ($routine, $ignore) = ParseInteractiveArgs($2); + + my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); + my $reduced = ReduceProfile($symbols, $profile); + + # Get derived profiles + my $flat = FlatProfile($reduced); + my $cumulative = CumulativeProfile($reduced); + + PrintListing($total, $libs, $flat, $cumulative, $routine, $html); + return 1; + } + if (m/^\s*disasm\s*(.+)/) { + $main::opt_disasm = 1; + + my $routine; + my $ignore; + ($routine, $ignore) = ParseInteractiveArgs($1); + + # Process current profile to account for various settings + my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore); + my $reduced = ReduceProfile($symbols, $profile); + + # Get derived profiles + my $flat = FlatProfile($reduced); + my $cumulative = CumulativeProfile($reduced); + + PrintDisassembly($libs, $flat, $cumulative, $routine); + return 1; + } + if (m/^\s*(gv|web|evince)\s*(.*)/) { + $main::opt_gv = 0; + $main::opt_evince = 0; + $main::opt_web = 0; + if ($1 eq "gv") { + $main::opt_gv = 1; + } elsif ($1 eq "evince") { + $main::opt_evince = 1; + } elsif ($1 eq "web") { + $main::opt_web = 1; + } + + my $focus; + my $ignore; + ($focus, $ignore) = ParseInteractiveArgs($2); + + # Process current profile to account for various settings + my $profile = ProcessProfile($total, $orig_profile, $symbols, + $focus, $ignore); + my $reduced = ReduceProfile($symbols, $profile); + + # Get derived profiles + my $flat = FlatProfile($reduced); + my $cumulative = CumulativeProfile($reduced); + + if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) { + if ($main::opt_gv) { + RunGV(TempName($main::next_tmpfile, "ps"), " &"); + } elsif ($main::opt_evince) { + RunEvince(TempName($main::next_tmpfile, "pdf"), " &"); + } elsif ($main::opt_web) { + RunWeb(TempName($main::next_tmpfile, "svg")); + } + $main::next_tmpfile++; + } + return 1; + } + if (m/^\s*$/) { + return 1; + } + print STDERR "Unknown command: try 'help'.\n"; + return 1; +} + + +sub ProcessProfile { + my $total_count = shift; + my $orig_profile = shift; + my $symbols = shift; + my $focus = shift; + my $ignore = shift; + + # Process current profile to account for various settings + my $profile = $orig_profile; + printf("Total: %s %s\n", Unparse($total_count), Units()); + if ($focus ne '') { + $profile = FocusProfile($symbols, $profile, $focus); + my $focus_count = TotalProfile($profile); + printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n", + $focus, + Unparse($focus_count), Units(), + Unparse($total_count), ($focus_count*100.0) / $total_count); + } + if ($ignore ne '') { + $profile = IgnoreProfile($symbols, $profile, $ignore); + my $ignore_count = TotalProfile($profile); + printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n", + $ignore, + Unparse($ignore_count), Units(), + Unparse($total_count), + ($ignore_count*100.0) / $total_count); + } + + return $profile; +} + +sub InteractiveHelpMessage { + print STDERR <{$k}; + my @addrs = split(/\n/, $k); + if ($#addrs >= 0) { + my $depth = $#addrs + 1; + # int(foo / 2**32) is the only reliable way to get rid of bottom + # 32 bits on both 32- and 64-bit systems. + print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32)); + print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32)); + + foreach my $full_addr (@addrs) { + my $addr = $full_addr; + $addr =~ s/0x0*//; # strip off leading 0x, zeroes + if (length($addr) > 16) { + print STDERR "Invalid address in profile: $full_addr\n"; + next; + } + my $low_addr = substr($addr, -8); # get last 8 hex chars + my $high_addr = substr($addr, -16, 8); # get up to 8 more hex chars + print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr)); + } + } + } +} + +# Print symbols and profile data +sub PrintSymbolizedProfile { + my $symbols = shift; + my $profile = shift; + my $prog = shift; + + $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash + my $symbol_marker = $&; + + print '--- ', $symbol_marker, "\n"; + if (defined($prog)) { + print 'binary=', $prog, "\n"; + } + while (my ($pc, $name) = each(%{$symbols})) { + my $sep = ' '; + print '0x', $pc; + # We have a list of function names, which include the inlined + # calls. They are separated (and terminated) by --, which is + # illegal in function names. + for (my $j = 2; $j <= $#{$name}; $j += 3) { + print $sep, $name->[$j]; + $sep = '--'; + } + print "\n"; + } + print '---', "\n"; + + my $profile_marker; + if ($main::profile_type eq 'heap') { + $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash + $profile_marker = $&; + } elsif ($main::profile_type eq 'growth') { + $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash + $profile_marker = $&; + } elsif ($main::profile_type eq 'contention') { + $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash + $profile_marker = $&; + } else { # elsif ($main::profile_type eq 'cpu') + $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash + $profile_marker = $&; + } + + print '--- ', $profile_marker, "\n"; + if (defined($main::collected_profile)) { + # if used with remote fetch, simply dump the collected profile to output. + open(SRC, "<$main::collected_profile"); + while () { + print $_; + } + close(SRC); + } else { + # --raw/http: For everything to work correctly for non-remote profiles, we + # would need to extend PrintProfileData() to handle all possible profile + # types, re-enable the code that is currently disabled in ReadCPUProfile() + # and FixCallerAddresses(), and remove the remote profile dumping code in + # the block above. + die "--raw/http: jeprof can only dump remote profiles for --raw\n"; + # dump a cpu-format profile to standard out + PrintProfileData($profile); + } +} + +# Print text output +sub PrintText { + my $symbols = shift; + my $flat = shift; + my $cumulative = shift; + my $line_limit = shift; + + my $total = TotalProfile($flat); + + # Which profile to sort by? + my $s = $main::opt_cum ? $cumulative : $flat; + + my $running_sum = 0; + my $lines = 0; + foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b } + keys(%{$cumulative})) { + my $f = GetEntry($flat, $k); + my $c = GetEntry($cumulative, $k); + $running_sum += $f; + + my $sym = $k; + if (exists($symbols->{$k})) { + $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1]; + if ($main::opt_addresses) { + $sym = $k . " " . $sym; + } + } + + if ($f != 0 || $c != 0) { + printf("%8s %6s %6s %8s %6s %s\n", + Unparse($f), + Percent($f, $total), + Percent($running_sum, $total), + Unparse($c), + Percent($c, $total), + $sym); + } + $lines++; + last if ($line_limit >= 0 && $lines >= $line_limit); + } +} + +# Callgrind format has a compression for repeated function and file +# names. You show the name the first time, and just use its number +# subsequently. This can cut down the file to about a third or a +# quarter of its uncompressed size. $key and $val are the key/value +# pair that would normally be printed by callgrind; $map is a map from +# value to number. +sub CompressedCGName { + my($key, $val, $map) = @_; + my $idx = $map->{$val}; + # For very short keys, providing an index hurts rather than helps. + if (length($val) <= 3) { + return "$key=$val\n"; + } elsif (defined($idx)) { + return "$key=($idx)\n"; + } else { + # scalar(keys $map) gives the number of items in the map. + $idx = scalar(keys(%{$map})) + 1; + $map->{$val} = $idx; + return "$key=($idx) $val\n"; + } +} + +# Print the call graph in a way that's suiteable for callgrind. +sub PrintCallgrind { + my $calls = shift; + my $filename; + my %filename_to_index_map; + my %fnname_to_index_map; + + if ($main::opt_interactive) { + $filename = shift; + print STDERR "Writing callgrind file to '$filename'.\n" + } else { + $filename = "&STDOUT"; + } + open(CG, ">$filename"); + printf CG ("events: Hits\n\n"); + foreach my $call ( map { $_->[0] } + sort { $a->[1] cmp $b ->[1] || + $a->[2] <=> $b->[2] } + map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; + [$_, $1, $2] } + keys %$calls ) { + my $count = int($calls->{$call}); + $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/; + my ( $caller_file, $caller_line, $caller_function, + $callee_file, $callee_line, $callee_function ) = + ( $1, $2, $3, $5, $6, $7 ); + + # TODO(csilvers): for better compression, collect all the + # caller/callee_files and functions first, before printing + # anything, and only compress those referenced more than once. + printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map); + printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map); + if (defined $6) { + printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map); + printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map); + printf CG ("calls=$count $callee_line\n"); + } + printf CG ("$caller_line $count\n\n"); + } +} + +# Print disassembly for all all routines that match $main::opt_disasm +sub PrintDisassembly { + my $libs = shift; + my $flat = shift; + my $cumulative = shift; + my $disasm_opts = shift; + + my $total = TotalProfile($flat); + + foreach my $lib (@{$libs}) { + my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts); + my $offset = AddressSub($lib->[1], $lib->[3]); + foreach my $routine (sort ByName keys(%{$symbol_table})) { + my $start_addr = $symbol_table->{$routine}->[0]; + my $end_addr = $symbol_table->{$routine}->[1]; + # See if there are any samples in this routine + my $length = hex(AddressSub($end_addr, $start_addr)); + my $addr = AddressAdd($start_addr, $offset); + for (my $i = 0; $i < $length; $i++) { + if (defined($cumulative->{$addr})) { + PrintDisassembledFunction($lib->[0], $offset, + $routine, $flat, $cumulative, + $start_addr, $end_addr, $total); + last; + } + $addr = AddressInc($addr); + } + } + } +} + +# Return reference to array of tuples of the form: +# [start_address, filename, linenumber, instruction, limit_address] +# E.g., +# ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"] +sub Disassemble { + my $prog = shift; + my $offset = shift; + my $start_addr = shift; + my $end_addr = shift; + + my $objdump = $obj_tool_map{"objdump"}; + my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn", + "--start-address=0x$start_addr", + "--stop-address=0x$end_addr", $prog); + open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); + my @result = (); + my $filename = ""; + my $linenumber = -1; + my $last = ["", "", "", ""]; + while () { + s/\r//g; # turn windows-looking lines into unix-looking lines + chop; + if (m|\s*([^:\s]+):(\d+)\s*$|) { + # Location line of the form: + # : + $filename = $1; + $linenumber = $2; + } elsif (m/^ +([0-9a-f]+):\s*(.*)/) { + # Disassembly line -- zero-extend address to full length + my $addr = HexExtend($1); + my $k = AddressAdd($addr, $offset); + $last->[4] = $k; # Store ending address for previous instruction + $last = [$k, $filename, $linenumber, $2, $end_addr]; + push(@result, $last); + } + } + close(OBJDUMP); + return @result; +} + +# The input file should contain lines of the form /proc/maps-like +# output (same format as expected from the profiles) or that looks +# like hex addresses (like "0xDEADBEEF"). We will parse all +# /proc/maps output, and for all the hex addresses, we will output +# "short" symbol names, one per line, in the same order as the input. +sub PrintSymbols { + my $maps_and_symbols_file = shift; + + # ParseLibraries expects pcs to be in a set. Fine by us... + my @pclist = (); # pcs in sorted order + my $pcs = {}; + my $map = ""; + foreach my $line (<$maps_and_symbols_file>) { + $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines + if ($line =~ /\b(0x[0-9a-f]+)\b/i) { + push(@pclist, HexExtend($1)); + $pcs->{$pclist[-1]} = 1; + } else { + $map .= $line; + } + } + + my $libs = ParseLibraries($main::prog, $map, $pcs); + my $symbols = ExtractSymbols($libs, $pcs); + + foreach my $pc (@pclist) { + # ->[0] is the shortname, ->[2] is the full name + print(($symbols->{$pc}->[0] || "??") . "\n"); + } +} + + +# For sorting functions by name +sub ByName { + return ShortFunctionName($a) cmp ShortFunctionName($b); +} + +# Print source-listing for all all routines that match $list_opts +sub PrintListing { + my $total = shift; + my $libs = shift; + my $flat = shift; + my $cumulative = shift; + my $list_opts = shift; + my $html = shift; + + my $output = \*STDOUT; + my $fname = ""; + + if ($html) { + # Arrange to write the output to a temporary file + $fname = TempName($main::next_tmpfile, "html"); + $main::next_tmpfile++; + if (!open(TEMP, ">$fname")) { + print STDERR "$fname: $!\n"; + return; + } + $output = \*TEMP; + print $output HtmlListingHeader(); + printf $output ("
%s
Total: %s %s
\n", + $main::prog, Unparse($total), Units()); + } + + my $listed = 0; + foreach my $lib (@{$libs}) { + my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts); + my $offset = AddressSub($lib->[1], $lib->[3]); + foreach my $routine (sort ByName keys(%{$symbol_table})) { + # Print if there are any samples in this routine + my $start_addr = $symbol_table->{$routine}->[0]; + my $end_addr = $symbol_table->{$routine}->[1]; + my $length = hex(AddressSub($end_addr, $start_addr)); + my $addr = AddressAdd($start_addr, $offset); + for (my $i = 0; $i < $length; $i++) { + if (defined($cumulative->{$addr})) { + $listed += PrintSource( + $lib->[0], $offset, + $routine, $flat, $cumulative, + $start_addr, $end_addr, + $html, + $output); + last; + } + $addr = AddressInc($addr); + } + } + } + + if ($html) { + if ($listed > 0) { + print $output HtmlListingFooter(); + close($output); + RunWeb($fname); + } else { + close($output); + unlink($fname); + } + } +} + +sub HtmlListingHeader { + return <<'EOF'; + + + +Pprof listing + + + + +EOF +} + +sub HtmlListingFooter { + return <<'EOF'; + + +EOF +} + +sub HtmlEscape { + my $text = shift; + $text =~ s/&/&/g; + $text =~ s//>/g; + return $text; +} + +# Returns the indentation of the line, if it has any non-whitespace +# characters. Otherwise, returns -1. +sub Indentation { + my $line = shift; + if (m/^(\s*)\S/) { + return length($1); + } else { + return -1; + } +} + +# If the symbol table contains inlining info, Disassemble() may tag an +# instruction with a location inside an inlined function. But for +# source listings, we prefer to use the location in the function we +# are listing. So use MapToSymbols() to fetch full location +# information for each instruction and then pick out the first +# location from a location list (location list contains callers before +# callees in case of inlining). +# +# After this routine has run, each entry in $instructions contains: +# [0] start address +# [1] filename for function we are listing +# [2] line number for function we are listing +# [3] disassembly +# [4] limit address +# [5] most specific filename (may be different from [1] due to inlining) +# [6] most specific line number (may be different from [2] due to inlining) +sub GetTopLevelLineNumbers { + my ($lib, $offset, $instructions) = @_; + my $pcs = []; + for (my $i = 0; $i <= $#{$instructions}; $i++) { + push(@{$pcs}, $instructions->[$i]->[0]); + } + my $symbols = {}; + MapToSymbols($lib, $offset, $pcs, $symbols); + for (my $i = 0; $i <= $#{$instructions}; $i++) { + my $e = $instructions->[$i]; + push(@{$e}, $e->[1]); + push(@{$e}, $e->[2]); + my $addr = $e->[0]; + my $sym = $symbols->{$addr}; + if (defined($sym)) { + if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) { + $e->[1] = $1; # File name + $e->[2] = $2; # Line number + } + } + } +} + +# Print source-listing for one routine +sub PrintSource { + my $prog = shift; + my $offset = shift; + my $routine = shift; + my $flat = shift; + my $cumulative = shift; + my $start_addr = shift; + my $end_addr = shift; + my $html = shift; + my $output = shift; + + # Disassemble all instructions (just to get line numbers) + my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); + GetTopLevelLineNumbers($prog, $offset, \@instructions); + + # Hack 1: assume that the first source file encountered in the + # disassembly contains the routine + my $filename = undef; + for (my $i = 0; $i <= $#instructions; $i++) { + if ($instructions[$i]->[2] >= 0) { + $filename = $instructions[$i]->[1]; + last; + } + } + if (!defined($filename)) { + print STDERR "no filename found in $routine\n"; + return 0; + } + + # Hack 2: assume that the largest line number from $filename is the + # end of the procedure. This is typically safe since if P1 contains + # an inlined call to P2, then P2 usually occurs earlier in the + # source file. If this does not work, we might have to compute a + # density profile or just print all regions we find. + my $lastline = 0; + for (my $i = 0; $i <= $#instructions; $i++) { + my $f = $instructions[$i]->[1]; + my $l = $instructions[$i]->[2]; + if (($f eq $filename) && ($l > $lastline)) { + $lastline = $l; + } + } + + # Hack 3: assume the first source location from "filename" is the start of + # the source code. + my $firstline = 1; + for (my $i = 0; $i <= $#instructions; $i++) { + if ($instructions[$i]->[1] eq $filename) { + $firstline = $instructions[$i]->[2]; + last; + } + } + + # Hack 4: Extend last line forward until its indentation is less than + # the indentation we saw on $firstline + my $oldlastline = $lastline; + { + if (!open(FILE, "<$filename")) { + print STDERR "$filename: $!\n"; + return 0; + } + my $l = 0; + my $first_indentation = -1; + while () { + s/\r//g; # turn windows-looking lines into unix-looking lines + $l++; + my $indent = Indentation($_); + if ($l >= $firstline) { + if ($first_indentation < 0 && $indent >= 0) { + $first_indentation = $indent; + last if ($first_indentation == 0); + } + } + if ($l >= $lastline && $indent >= 0) { + if ($indent >= $first_indentation) { + $lastline = $l+1; + } else { + last; + } + } + } + close(FILE); + } + + # Assign all samples to the range $firstline,$lastline, + # Hack 4: If an instruction does not occur in the range, its samples + # are moved to the next instruction that occurs in the range. + my $samples1 = {}; # Map from line number to flat count + my $samples2 = {}; # Map from line number to cumulative count + my $running1 = 0; # Unassigned flat counts + my $running2 = 0; # Unassigned cumulative counts + my $total1 = 0; # Total flat counts + my $total2 = 0; # Total cumulative counts + my %disasm = (); # Map from line number to disassembly + my $running_disasm = ""; # Unassigned disassembly + my $skip_marker = "---\n"; + if ($html) { + $skip_marker = ""; + for (my $l = $firstline; $l <= $lastline; $l++) { + $disasm{$l} = ""; + } + } + my $last_dis_filename = ''; + my $last_dis_linenum = -1; + my $last_touched_line = -1; # To detect gaps in disassembly for a line + foreach my $e (@instructions) { + # Add up counts for all address that fall inside this instruction + my $c1 = 0; + my $c2 = 0; + for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { + $c1 += GetEntry($flat, $a); + $c2 += GetEntry($cumulative, $a); + } + + if ($html) { + my $dis = sprintf(" %6s %6s \t\t%8s: %s ", + HtmlPrintNumber($c1), + HtmlPrintNumber($c2), + UnparseAddress($offset, $e->[0]), + CleanDisassembly($e->[3])); + + # Append the most specific source line associated with this instruction + if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) }; + $dis = HtmlEscape($dis); + my $f = $e->[5]; + my $l = $e->[6]; + if ($f ne $last_dis_filename) { + $dis .= sprintf("%s:%d", + HtmlEscape(CleanFileName($f)), $l); + } elsif ($l ne $last_dis_linenum) { + # De-emphasize the unchanged file name portion + $dis .= sprintf("%s" . + ":%d", + HtmlEscape(CleanFileName($f)), $l); + } else { + # De-emphasize the entire location + $dis .= sprintf("%s:%d", + HtmlEscape(CleanFileName($f)), $l); + } + $last_dis_filename = $f; + $last_dis_linenum = $l; + $running_disasm .= $dis; + $running_disasm .= "\n"; + } + + $running1 += $c1; + $running2 += $c2; + $total1 += $c1; + $total2 += $c2; + my $file = $e->[1]; + my $line = $e->[2]; + if (($file eq $filename) && + ($line >= $firstline) && + ($line <= $lastline)) { + # Assign all accumulated samples to this line + AddEntry($samples1, $line, $running1); + AddEntry($samples2, $line, $running2); + $running1 = 0; + $running2 = 0; + if ($html) { + if ($line != $last_touched_line && $disasm{$line} ne '') { + $disasm{$line} .= "\n"; + } + $disasm{$line} .= $running_disasm; + $running_disasm = ''; + $last_touched_line = $line; + } + } + } + + # Assign any leftover samples to $lastline + AddEntry($samples1, $lastline, $running1); + AddEntry($samples2, $lastline, $running2); + if ($html) { + if ($lastline != $last_touched_line && $disasm{$lastline} ne '') { + $disasm{$lastline} .= "\n"; + } + $disasm{$lastline} .= $running_disasm; + } + + if ($html) { + printf $output ( + "

%s

%s\n
\n" .
+      "Total:%6s %6s (flat / cumulative %s)\n",
+      HtmlEscape(ShortFunctionName($routine)),
+      HtmlEscape(CleanFileName($filename)),
+      Unparse($total1),
+      Unparse($total2),
+      Units());
+  } else {
+    printf $output (
+      "ROUTINE ====================== %s in %s\n" .
+      "%6s %6s Total %s (flat / cumulative)\n",
+      ShortFunctionName($routine),
+      CleanFileName($filename),
+      Unparse($total1),
+      Unparse($total2),
+      Units());
+  }
+  if (!open(FILE, "<$filename")) {
+    print STDERR "$filename: $!\n";
+    return 0;
+  }
+  my $l = 0;
+  while () {
+    s/\r//g;         # turn windows-looking lines into unix-looking lines
+    $l++;
+    if ($l >= $firstline - 5 &&
+        (($l <= $oldlastline + 5) || ($l <= $lastline))) {
+      chop;
+      my $text = $_;
+      if ($l == $firstline) { print $output $skip_marker; }
+      my $n1 = GetEntry($samples1, $l);
+      my $n2 = GetEntry($samples2, $l);
+      if ($html) {
+        # Emit a span that has one of the following classes:
+        #    livesrc -- has samples
+        #    deadsrc -- has disassembly, but with no samples
+        #    nop     -- has no matching disasembly
+        # Also emit an optional span containing disassembly.
+        my $dis = $disasm{$l};
+        my $asm = "";
+        if (defined($dis) && $dis ne '') {
+          $asm = "" . $dis . "";
+        }
+        my $source_class = (($n1 + $n2 > 0)
+                            ? "livesrc"
+                            : (($asm ne "") ? "deadsrc" : "nop"));
+        printf $output (
+          "%5d " .
+          "%6s %6s %s%s\n",
+          $l, $source_class,
+          HtmlPrintNumber($n1),
+          HtmlPrintNumber($n2),
+          HtmlEscape($text),
+          $asm);
+      } else {
+        printf $output(
+          "%6s %6s %4d: %s\n",
+          UnparseAlt($n1),
+          UnparseAlt($n2),
+          $l,
+          $text);
+      }
+      if ($l == $lastline)  { print $output $skip_marker; }
+    };
+  }
+  close(FILE);
+  if ($html) {
+    print $output "
\n"; + } + return 1; +} + +# Return the source line for the specified file/linenumber. +# Returns undef if not found. +sub SourceLine { + my $file = shift; + my $line = shift; + + # Look in cache + if (!defined($main::source_cache{$file})) { + if (100 < scalar keys(%main::source_cache)) { + # Clear the cache when it gets too big + $main::source_cache = (); + } + + # Read all lines from the file + if (!open(FILE, "<$file")) { + print STDERR "$file: $!\n"; + $main::source_cache{$file} = []; # Cache the negative result + return undef; + } + my $lines = []; + push(@{$lines}, ""); # So we can use 1-based line numbers as indices + while () { + push(@{$lines}, $_); + } + close(FILE); + + # Save the lines in the cache + $main::source_cache{$file} = $lines; + } + + my $lines = $main::source_cache{$file}; + if (($line < 0) || ($line > $#{$lines})) { + return undef; + } else { + return $lines->[$line]; + } +} + +# Print disassembly for one routine with interspersed source if available +sub PrintDisassembledFunction { + my $prog = shift; + my $offset = shift; + my $routine = shift; + my $flat = shift; + my $cumulative = shift; + my $start_addr = shift; + my $end_addr = shift; + my $total = shift; + + # Disassemble all instructions + my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); + + # Make array of counts per instruction + my @flat_count = (); + my @cum_count = (); + my $flat_total = 0; + my $cum_total = 0; + foreach my $e (@instructions) { + # Add up counts for all address that fall inside this instruction + my $c1 = 0; + my $c2 = 0; + for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { + $c1 += GetEntry($flat, $a); + $c2 += GetEntry($cumulative, $a); + } + push(@flat_count, $c1); + push(@cum_count, $c2); + $flat_total += $c1; + $cum_total += $c2; + } + + # Print header with total counts + printf("ROUTINE ====================== %s\n" . + "%6s %6s %s (flat, cumulative) %.1f%% of total\n", + ShortFunctionName($routine), + Unparse($flat_total), + Unparse($cum_total), + Units(), + ($cum_total * 100.0) / $total); + + # Process instructions in order + my $current_file = ""; + for (my $i = 0; $i <= $#instructions; ) { + my $e = $instructions[$i]; + + # Print the new file name whenever we switch files + if ($e->[1] ne $current_file) { + $current_file = $e->[1]; + my $fname = $current_file; + $fname =~ s|^\./||; # Trim leading "./" + + # Shorten long file names + if (length($fname) >= 58) { + $fname = "..." . substr($fname, -55); + } + printf("-------------------- %s\n", $fname); + } + + # TODO: Compute range of lines to print together to deal with + # small reorderings. + my $first_line = $e->[2]; + my $last_line = $first_line; + my %flat_sum = (); + my %cum_sum = (); + for (my $l = $first_line; $l <= $last_line; $l++) { + $flat_sum{$l} = 0; + $cum_sum{$l} = 0; + } + + # Find run of instructions for this range of source lines + my $first_inst = $i; + while (($i <= $#instructions) && + ($instructions[$i]->[2] >= $first_line) && + ($instructions[$i]->[2] <= $last_line)) { + $e = $instructions[$i]; + $flat_sum{$e->[2]} += $flat_count[$i]; + $cum_sum{$e->[2]} += $cum_count[$i]; + $i++; + } + my $last_inst = $i - 1; + + # Print source lines + for (my $l = $first_line; $l <= $last_line; $l++) { + my $line = SourceLine($current_file, $l); + if (!defined($line)) { + $line = "?\n"; + next; + } else { + $line =~ s/^\s+//; + } + printf("%6s %6s %5d: %s", + UnparseAlt($flat_sum{$l}), + UnparseAlt($cum_sum{$l}), + $l, + $line); + } + + # Print disassembly + for (my $x = $first_inst; $x <= $last_inst; $x++) { + my $e = $instructions[$x]; + printf("%6s %6s %8s: %6s\n", + UnparseAlt($flat_count[$x]), + UnparseAlt($cum_count[$x]), + UnparseAddress($offset, $e->[0]), + CleanDisassembly($e->[3])); + } + } +} + +# Print DOT graph +sub PrintDot { + my $prog = shift; + my $symbols = shift; + my $raw = shift; + my $flat = shift; + my $cumulative = shift; + my $overall_total = shift; + + # Get total + my $local_total = TotalProfile($flat); + my $nodelimit = int($main::opt_nodefraction * $local_total); + my $edgelimit = int($main::opt_edgefraction * $local_total); + my $nodecount = $main::opt_nodecount; + + # Find nodes to include + my @list = (sort { abs(GetEntry($cumulative, $b)) <=> + abs(GetEntry($cumulative, $a)) + || $a cmp $b } + keys(%{$cumulative})); + my $last = $nodecount - 1; + if ($last > $#list) { + $last = $#list; + } + while (($last >= 0) && + (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) { + $last--; + } + if ($last < 0) { + print STDERR "No nodes to print\n"; + return 0; + } + + if ($nodelimit > 0 || $edgelimit > 0) { + printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n", + Unparse($nodelimit), Units(), + Unparse($edgelimit), Units()); + } + + # Open DOT output file + my $output; + my $escaped_dot = ShellEscape(@DOT); + my $escaped_ps2pdf = ShellEscape(@PS2PDF); + if ($main::opt_gv) { + my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps")); + $output = "| $escaped_dot -Tps2 >$escaped_outfile"; + } elsif ($main::opt_evince) { + my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf")); + $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile"; + } elsif ($main::opt_ps) { + $output = "| $escaped_dot -Tps2"; + } elsif ($main::opt_pdf) { + $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -"; + } elsif ($main::opt_web || $main::opt_svg) { + # We need to post-process the SVG, so write to a temporary file always. + my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg")); + $output = "| $escaped_dot -Tsvg >$escaped_outfile"; + } elsif ($main::opt_gif) { + $output = "| $escaped_dot -Tgif"; + } else { + $output = ">&STDOUT"; + } + open(DOT, $output) || error("$output: $!\n"); + + # Title + printf DOT ("digraph \"%s; %s %s\" {\n", + $prog, + Unparse($overall_total), + Units()); + if ($main::opt_pdf) { + # The output is more printable if we set the page size for dot. + printf DOT ("size=\"8,11\"\n"); + } + printf DOT ("node [width=0.375,height=0.25];\n"); + + # Print legend + printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," . + "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n", + $prog, + sprintf("Total %s: %s", Units(), Unparse($overall_total)), + sprintf("Focusing on: %s", Unparse($local_total)), + sprintf("Dropped nodes with <= %s abs(%s)", + Unparse($nodelimit), Units()), + sprintf("Dropped edges with <= %s %s", + Unparse($edgelimit), Units()) + ); + + # Print nodes + my %node = (); + my $nextnode = 1; + foreach my $a (@list[0..$last]) { + # Pick font size + my $f = GetEntry($flat, $a); + my $c = GetEntry($cumulative, $a); + + my $fs = 8; + if ($local_total > 0) { + $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total))); + } + + $node{$a} = $nextnode++; + my $sym = $a; + $sym =~ s/\s+/\\n/g; + $sym =~ s/::/\\n/g; + + # Extra cumulative info to print for non-leaves + my $extra = ""; + if ($f != $c) { + $extra = sprintf("\\rof %s (%s)", + Unparse($c), + Percent($c, $local_total)); + } + my $style = ""; + if ($main::opt_heapcheck) { + if ($f > 0) { + # make leak-causing nodes more visible (add a background) + $style = ",style=filled,fillcolor=gray" + } elsif ($f < 0) { + # make anti-leak-causing nodes (which almost never occur) + # stand out as well (triple border) + $style = ",peripheries=3" + } + } + + printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" . + "\",shape=box,fontsize=%.1f%s];\n", + $node{$a}, + $sym, + Unparse($f), + Percent($f, $local_total), + $extra, + $fs, + $style, + ); + } + + # Get edges and counts per edge + my %edge = (); + my $n; + my $fullname_to_shortname_map = {}; + FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); + foreach my $k (keys(%{$raw})) { + # TODO: omit low %age edges + $n = $raw->{$k}; + my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); + for (my $i = 1; $i <= $#translated; $i++) { + my $src = $translated[$i]; + my $dst = $translated[$i-1]; + #next if ($src eq $dst); # Avoid self-edges? + if (exists($node{$src}) && exists($node{$dst})) { + my $edge_label = "$src\001$dst"; + if (!exists($edge{$edge_label})) { + $edge{$edge_label} = 0; + } + $edge{$edge_label} += $n; + } + } + } + + # Print edges (process in order of decreasing counts) + my %indegree = (); # Number of incoming edges added per node so far + my %outdegree = (); # Number of outgoing edges added per node so far + foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) { + my @x = split(/\001/, $e); + $n = $edge{$e}; + + # Initialize degree of kept incoming and outgoing edges if necessary + my $src = $x[0]; + my $dst = $x[1]; + if (!exists($outdegree{$src})) { $outdegree{$src} = 0; } + if (!exists($indegree{$dst})) { $indegree{$dst} = 0; } + + my $keep; + if ($indegree{$dst} == 0) { + # Keep edge if needed for reachability + $keep = 1; + } elsif (abs($n) <= $edgelimit) { + # Drop if we are below --edgefraction + $keep = 0; + } elsif ($outdegree{$src} >= $main::opt_maxdegree || + $indegree{$dst} >= $main::opt_maxdegree) { + # Keep limited number of in/out edges per node + $keep = 0; + } else { + $keep = 1; + } + + if ($keep) { + $outdegree{$src}++; + $indegree{$dst}++; + + # Compute line width based on edge count + my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0); + if ($fraction > 1) { $fraction = 1; } + my $w = $fraction * 2; + if ($w < 1 && ($main::opt_web || $main::opt_svg)) { + # SVG output treats line widths < 1 poorly. + $w = 1; + } + + # Dot sometimes segfaults if given edge weights that are too large, so + # we cap the weights at a large value + my $edgeweight = abs($n) ** 0.7; + if ($edgeweight > 100000) { $edgeweight = 100000; } + $edgeweight = int($edgeweight); + + my $style = sprintf("setlinewidth(%f)", $w); + if ($x[1] =~ m/\(inline\)/) { + $style .= ",dashed"; + } + + # Use a slightly squashed function of the edge count as the weight + printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n", + $node{$x[0]}, + $node{$x[1]}, + Unparse($n), + $edgeweight, + $style); + } + } + + print DOT ("}\n"); + close(DOT); + + if ($main::opt_web || $main::opt_svg) { + # Rewrite SVG to be more usable inside web browser. + RewriteSvg(TempName($main::next_tmpfile, "svg")); + } + + return 1; +} + +sub RewriteSvg { + my $svgfile = shift; + + open(SVG, $svgfile) || die "open temp svg: $!"; + my @svg = ; + close(SVG); + unlink $svgfile; + my $svg = join('', @svg); + + # Dot's SVG output is + # + # + # + # ... + # + # + # + # Change it to + # + # + # $svg_javascript + # + # + # ... + # + # + # + + # Fix width, height; drop viewBox. + $svg =~ s/(?s) above first + my $svg_javascript = SvgJavascript(); + my $viewport = "\n"; + $svg =~ s/ above . + $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/; + $svg =~ s/$svgfile") || die "open $svgfile: $!"; + print SVG $svg; + close(SVG); + } +} + +sub SvgJavascript { + return <<'EOF'; + +EOF +} + +# Provides a map from fullname to shortname for cases where the +# shortname is ambiguous. The symlist has both the fullname and +# shortname for all symbols, which is usually fine, but sometimes -- +# such as overloaded functions -- two different fullnames can map to +# the same shortname. In that case, we use the address of the +# function to disambiguate the two. This function fills in a map that +# maps fullnames to modified shortnames in such cases. If a fullname +# is not present in the map, the 'normal' shortname provided by the +# symlist is the appropriate one to use. +sub FillFullnameToShortnameMap { + my $symbols = shift; + my $fullname_to_shortname_map = shift; + my $shortnames_seen_once = {}; + my $shortnames_seen_more_than_once = {}; + + foreach my $symlist (values(%{$symbols})) { + # TODO(csilvers): deal with inlined symbols too. + my $shortname = $symlist->[0]; + my $fullname = $symlist->[2]; + if ($fullname !~ /<[0-9a-fA-F]+>$/) { # fullname doesn't end in an address + next; # the only collisions we care about are when addresses differ + } + if (defined($shortnames_seen_once->{$shortname}) && + $shortnames_seen_once->{$shortname} ne $fullname) { + $shortnames_seen_more_than_once->{$shortname} = 1; + } else { + $shortnames_seen_once->{$shortname} = $fullname; + } + } + + foreach my $symlist (values(%{$symbols})) { + my $shortname = $symlist->[0]; + my $fullname = $symlist->[2]; + # TODO(csilvers): take in a list of addresses we care about, and only + # store in the map if $symlist->[1] is in that list. Saves space. + next if defined($fullname_to_shortname_map->{$fullname}); + if (defined($shortnames_seen_more_than_once->{$shortname})) { + if ($fullname =~ /<0*([^>]*)>$/) { # fullname has address at end of it + $fullname_to_shortname_map->{$fullname} = "$shortname\@$1"; + } + } + } +} + +# Return a small number that identifies the argument. +# Multiple calls with the same argument will return the same number. +# Calls with different arguments will return different numbers. +sub ShortIdFor { + my $key = shift; + my $id = $main::uniqueid{$key}; + if (!defined($id)) { + $id = keys(%main::uniqueid) + 1; + $main::uniqueid{$key} = $id; + } + return $id; +} + +# Translate a stack of addresses into a stack of symbols +sub TranslateStack { + my $symbols = shift; + my $fullname_to_shortname_map = shift; + my $k = shift; + + my @addrs = split(/\n/, $k); + my @result = (); + for (my $i = 0; $i <= $#addrs; $i++) { + my $a = $addrs[$i]; + + # Skip large addresses since they sometimes show up as fake entries on RH9 + if (length($a) > 8 && $a gt "7fffffffffffffff") { + next; + } + + if ($main::opt_disasm || $main::opt_list) { + # We want just the address for the key + push(@result, $a); + next; + } + + my $symlist = $symbols->{$a}; + if (!defined($symlist)) { + $symlist = [$a, "", $a]; + } + + # We can have a sequence of symbols for a particular entry + # (more than one symbol in the case of inlining). Callers + # come before callees in symlist, so walk backwards since + # the translated stack should contain callees before callers. + for (my $j = $#{$symlist}; $j >= 2; $j -= 3) { + my $func = $symlist->[$j-2]; + my $fileline = $symlist->[$j-1]; + my $fullfunc = $symlist->[$j]; + if (defined($fullname_to_shortname_map->{$fullfunc})) { + $func = $fullname_to_shortname_map->{$fullfunc}; + } + if ($j > 2) { + $func = "$func (inline)"; + } + + # Do not merge nodes corresponding to Callback::Run since that + # causes confusing cycles in dot display. Instead, we synthesize + # a unique name for this frame per caller. + if ($func =~ m/Callback.*::Run$/) { + my $caller = ($i > 0) ? $addrs[$i-1] : 0; + $func = "Run#" . ShortIdFor($caller); + } + + if ($main::opt_addresses) { + push(@result, "$a $func $fileline"); + } elsif ($main::opt_lines) { + if ($func eq '??' && $fileline eq '??:0') { + push(@result, "$a"); + } else { + push(@result, "$func $fileline"); + } + } elsif ($main::opt_functions) { + if ($func eq '??') { + push(@result, "$a"); + } else { + push(@result, $func); + } + } elsif ($main::opt_files) { + if ($fileline eq '??:0' || $fileline eq '') { + push(@result, "$a"); + } else { + my $f = $fileline; + $f =~ s/:\d+$//; + push(@result, $f); + } + } else { + push(@result, $a); + last; # Do not print inlined info + } + } + } + + # print join(",", @addrs), " => ", join(",", @result), "\n"; + return @result; +} + +# Generate percent string for a number and a total +sub Percent { + my $num = shift; + my $tot = shift; + if ($tot != 0) { + return sprintf("%.1f%%", $num * 100.0 / $tot); + } else { + return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf"); + } +} + +# Generate pretty-printed form of number +sub Unparse { + my $num = shift; + if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { + if ($main::opt_inuse_objects || $main::opt_alloc_objects) { + return sprintf("%d", $num); + } else { + if ($main::opt_show_bytes) { + return sprintf("%d", $num); + } else { + return sprintf("%.1f", $num / 1048576.0); + } + } + } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { + return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds + } else { + return sprintf("%d", $num); + } +} + +# Alternate pretty-printed form: 0 maps to "." +sub UnparseAlt { + my $num = shift; + if ($num == 0) { + return "."; + } else { + return Unparse($num); + } +} + +# Alternate pretty-printed form: 0 maps to "" +sub HtmlPrintNumber { + my $num = shift; + if ($num == 0) { + return ""; + } else { + return Unparse($num); + } +} + +# Return output units +sub Units { + if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { + if ($main::opt_inuse_objects || $main::opt_alloc_objects) { + return "objects"; + } else { + if ($main::opt_show_bytes) { + return "B"; + } else { + return "MB"; + } + } + } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { + return "seconds"; + } else { + return "samples"; + } +} + +##### Profile manipulation code ##### + +# Generate flattened profile: +# If count is charged to stack [a,b,c,d], in generated profile, +# it will be charged to [a] +sub FlatProfile { + my $profile = shift; + my $result = {}; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + if ($#addrs >= 0) { + AddEntry($result, $addrs[0], $count); + } + } + return $result; +} + +# Generate cumulative profile: +# If count is charged to stack [a,b,c,d], in generated profile, +# it will be charged to [a], [b], [c], [d] +sub CumulativeProfile { + my $profile = shift; + my $result = {}; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + foreach my $a (@addrs) { + AddEntry($result, $a, $count); + } + } + return $result; +} + +# If the second-youngest PC on the stack is always the same, returns +# that pc. Otherwise, returns undef. +sub IsSecondPcAlwaysTheSame { + my $profile = shift; + + my $second_pc = undef; + foreach my $k (keys(%{$profile})) { + my @addrs = split(/\n/, $k); + if ($#addrs < 1) { + return undef; + } + if (not defined $second_pc) { + $second_pc = $addrs[1]; + } else { + if ($second_pc ne $addrs[1]) { + return undef; + } + } + } + return $second_pc; +} + +sub ExtractSymbolLocation { + my $symbols = shift; + my $address = shift; + # 'addr2line' outputs "??:0" for unknown locations; we do the + # same to be consistent. + my $location = "??:0:unknown"; + if (exists $symbols->{$address}) { + my $file = $symbols->{$address}->[1]; + if ($file eq "?") { + $file = "??:0" + } + $location = $file . ":" . $symbols->{$address}->[0]; + } + return $location; +} + +# Extracts a graph of calls. +sub ExtractCalls { + my $symbols = shift; + my $profile = shift; + + my $calls = {}; + while( my ($stack_trace, $count) = each %$profile ) { + my @address = split(/\n/, $stack_trace); + my $destination = ExtractSymbolLocation($symbols, $address[0]); + AddEntry($calls, $destination, $count); + for (my $i = 1; $i <= $#address; $i++) { + my $source = ExtractSymbolLocation($symbols, $address[$i]); + my $call = "$source -> $destination"; + AddEntry($calls, $call, $count); + $destination = $source; + } + } + + return $calls; +} + +sub FilterFrames { + my $symbols = shift; + my $profile = shift; + + if ($main::opt_retain eq '' && $main::opt_exclude eq '') { + return $profile; + } + + my $result = {}; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + my @path = (); + foreach my $a (@addrs) { + my $sym; + if (exists($symbols->{$a})) { + $sym = $symbols->{$a}->[0]; + } else { + $sym = $a; + } + if ($main::opt_retain ne '' && $sym !~ m/$main::opt_retain/) { + next; + } + if ($main::opt_exclude ne '' && $sym =~ m/$main::opt_exclude/) { + next; + } + push(@path, $a); + } + if (scalar(@path) > 0) { + my $reduced_path = join("\n", @path); + AddEntry($result, $reduced_path, $count); + } + } + + return $result; +} + +sub RemoveUninterestingFrames { + my $symbols = shift; + my $profile = shift; + + # List of function names to skip + my %skip = (); + my $skip_regexp = 'NOMATCH'; + if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { + foreach my $name ('@JEMALLOC_PREFIX@calloc', + 'cfree', + '@JEMALLOC_PREFIX@malloc', + 'newImpl', + 'void* newImpl', + '@JEMALLOC_PREFIX@free', + '@JEMALLOC_PREFIX@memalign', + '@JEMALLOC_PREFIX@posix_memalign', + '@JEMALLOC_PREFIX@aligned_alloc', + 'pvalloc', + '@JEMALLOC_PREFIX@valloc', + '@JEMALLOC_PREFIX@realloc', + '@JEMALLOC_PREFIX@mallocx', + '@JEMALLOC_PREFIX@rallocx', + '@JEMALLOC_PREFIX@xallocx', + '@JEMALLOC_PREFIX@dallocx', + '@JEMALLOC_PREFIX@sdallocx', + '@JEMALLOC_PREFIX@sdallocx_noflags', + 'tc_calloc', + 'tc_cfree', + 'tc_malloc', + 'tc_free', + 'tc_memalign', + 'tc_posix_memalign', + 'tc_pvalloc', + 'tc_valloc', + 'tc_realloc', + 'tc_new', + 'tc_delete', + 'tc_newarray', + 'tc_deletearray', + 'tc_new_nothrow', + 'tc_newarray_nothrow', + 'do_malloc', + '::do_malloc', # new name -- got moved to an unnamed ns + '::do_malloc_or_cpp_alloc', + 'DoSampledAllocation', + 'simple_alloc::allocate', + '__malloc_alloc_template::allocate', + '__builtin_delete', + '__builtin_new', + '__builtin_vec_delete', + '__builtin_vec_new', + 'operator new', + 'operator new[]', + # The entry to our memory-allocation routines on OS X + 'malloc_zone_malloc', + 'malloc_zone_calloc', + 'malloc_zone_valloc', + 'malloc_zone_realloc', + 'malloc_zone_memalign', + 'malloc_zone_free', + # These mark the beginning/end of our custom sections + '__start_google_malloc', + '__stop_google_malloc', + '__start_malloc_hook', + '__stop_malloc_hook') { + $skip{$name} = 1; + $skip{"_" . $name} = 1; # Mach (OS X) adds a _ prefix to everything + } + # TODO: Remove TCMalloc once everything has been + # moved into the tcmalloc:: namespace and we have flushed + # old code out of the system. + $skip_regexp = "TCMalloc|^tcmalloc::"; + } elsif ($main::profile_type eq 'contention') { + foreach my $vname ('base::RecordLockProfileData', + 'base::SubmitMutexProfileData', + 'base::SubmitSpinLockProfileData', + 'Mutex::Unlock', + 'Mutex::UnlockSlow', + 'Mutex::ReaderUnlock', + 'MutexLock::~MutexLock', + 'SpinLock::Unlock', + 'SpinLock::SlowUnlock', + 'SpinLockHolder::~SpinLockHolder') { + $skip{$vname} = 1; + } + } elsif ($main::profile_type eq 'cpu') { + # Drop signal handlers used for CPU profile collection + # TODO(dpeng): this should not be necessary; it's taken + # care of by the general 2nd-pc mechanism below. + foreach my $name ('ProfileData::Add', # historical + 'ProfileData::prof_handler', # historical + 'CpuProfiler::prof_handler', + '__FRAME_END__', + '__pthread_sighandler', + '__restore') { + $skip{$name} = 1; + } + } else { + # Nothing skipped for unknown types + } + + if ($main::profile_type eq 'cpu') { + # If all the second-youngest program counters are the same, + # this STRONGLY suggests that it is an artifact of measurement, + # i.e., stack frames pushed by the CPU profiler signal handler. + # Hence, we delete them. + # (The topmost PC is read from the signal structure, not from + # the stack, so it does not get involved.) + while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) { + my $result = {}; + my $func = ''; + if (exists($symbols->{$second_pc})) { + $second_pc = $symbols->{$second_pc}->[0]; + } + print STDERR "Removing $second_pc from all stack traces.\n"; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + splice @addrs, 1, 1; + my $reduced_path = join("\n", @addrs); + AddEntry($result, $reduced_path, $count); + } + $profile = $result; + } + } + + my $result = {}; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + my @path = (); + foreach my $a (@addrs) { + if (exists($symbols->{$a})) { + my $func = $symbols->{$a}->[0]; + if ($skip{$func} || ($func =~ m/$skip_regexp/)) { + # Throw away the portion of the backtrace seen so far, under the + # assumption that previous frames were for functions internal to the + # allocator. + @path = (); + next; + } + } + push(@path, $a); + } + my $reduced_path = join("\n", @path); + AddEntry($result, $reduced_path, $count); + } + + $result = FilterFrames($symbols, $result); + + return $result; +} + +# Reduce profile to granularity given by user +sub ReduceProfile { + my $symbols = shift; + my $profile = shift; + my $result = {}; + my $fullname_to_shortname_map = {}; + FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); + my @path = (); + my %seen = (); + $seen{''} = 1; # So that empty keys are skipped + foreach my $e (@translated) { + # To avoid double-counting due to recursion, skip a stack-trace + # entry if it has already been seen + if (!$seen{$e}) { + $seen{$e} = 1; + push(@path, $e); + } + } + my $reduced_path = join("\n", @path); + AddEntry($result, $reduced_path, $count); + } + return $result; +} + +# Does the specified symbol array match the regexp? +sub SymbolMatches { + my $sym = shift; + my $re = shift; + if (defined($sym)) { + for (my $i = 0; $i < $#{$sym}; $i += 3) { + if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) { + return 1; + } + } + } + return 0; +} + +# Focus only on paths involving specified regexps +sub FocusProfile { + my $symbols = shift; + my $profile = shift; + my $focus = shift; + my $result = {}; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + foreach my $a (@addrs) { + # Reply if it matches either the address/shortname/fileline + if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) { + AddEntry($result, $k, $count); + last; + } + } + } + return $result; +} + +# Focus only on paths not involving specified regexps +sub IgnoreProfile { + my $symbols = shift; + my $profile = shift; + my $ignore = shift; + my $result = {}; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + my $matched = 0; + foreach my $a (@addrs) { + # Reply if it matches either the address/shortname/fileline + if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) { + $matched = 1; + last; + } + } + if (!$matched) { + AddEntry($result, $k, $count); + } + } + return $result; +} + +# Get total count in profile +sub TotalProfile { + my $profile = shift; + my $result = 0; + foreach my $k (keys(%{$profile})) { + $result += $profile->{$k}; + } + return $result; +} + +# Add A to B +sub AddProfile { + my $A = shift; + my $B = shift; + + my $R = {}; + # add all keys in A + foreach my $k (keys(%{$A})) { + my $v = $A->{$k}; + AddEntry($R, $k, $v); + } + # add all keys in B + foreach my $k (keys(%{$B})) { + my $v = $B->{$k}; + AddEntry($R, $k, $v); + } + return $R; +} + +# Merges symbol maps +sub MergeSymbols { + my $A = shift; + my $B = shift; + + my $R = {}; + foreach my $k (keys(%{$A})) { + $R->{$k} = $A->{$k}; + } + if (defined($B)) { + foreach my $k (keys(%{$B})) { + $R->{$k} = $B->{$k}; + } + } + return $R; +} + + +# Add A to B +sub AddPcs { + my $A = shift; + my $B = shift; + + my $R = {}; + # add all keys in A + foreach my $k (keys(%{$A})) { + $R->{$k} = 1 + } + # add all keys in B + foreach my $k (keys(%{$B})) { + $R->{$k} = 1 + } + return $R; +} + +# Subtract B from A +sub SubtractProfile { + my $A = shift; + my $B = shift; + + my $R = {}; + foreach my $k (keys(%{$A})) { + my $v = $A->{$k} - GetEntry($B, $k); + if ($v < 0 && $main::opt_drop_negative) { + $v = 0; + } + AddEntry($R, $k, $v); + } + if (!$main::opt_drop_negative) { + # Take care of when subtracted profile has more entries + foreach my $k (keys(%{$B})) { + if (!exists($A->{$k})) { + AddEntry($R, $k, 0 - $B->{$k}); + } + } + } + return $R; +} + +# Get entry from profile; zero if not present +sub GetEntry { + my $profile = shift; + my $k = shift; + if (exists($profile->{$k})) { + return $profile->{$k}; + } else { + return 0; + } +} + +# Add entry to specified profile +sub AddEntry { + my $profile = shift; + my $k = shift; + my $n = shift; + if (!exists($profile->{$k})) { + $profile->{$k} = 0; + } + $profile->{$k} += $n; +} + +# Add a stack of entries to specified profile, and add them to the $pcs +# list. +sub AddEntries { + my $profile = shift; + my $pcs = shift; + my $stack = shift; + my $count = shift; + my @k = (); + + foreach my $e (split(/\s+/, $stack)) { + my $pc = HexExtend($e); + $pcs->{$pc} = 1; + push @k, $pc; + } + AddEntry($profile, (join "\n", @k), $count); +} + +##### Code to profile a server dynamically ##### + +sub CheckSymbolPage { + my $url = SymbolPageURL(); + my $command = ShellEscape(@URL_FETCHER, $url); + open(SYMBOL, "$command |") or error($command); + my $line = ; + $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines + close(SYMBOL); + unless (defined($line)) { + error("$url doesn't exist\n"); + } + + if ($line =~ /^num_symbols:\s+(\d+)$/) { + if ($1 == 0) { + error("Stripped binary. No symbols available.\n"); + } + } else { + error("Failed to get the number of symbols from $url\n"); + } +} + +sub IsProfileURL { + my $profile_name = shift; + if (-f $profile_name) { + printf STDERR "Using local file $profile_name.\n"; + return 0; + } + return 1; +} + +sub ParseProfileURL { + my $profile_name = shift; + + if (!defined($profile_name) || $profile_name eq "") { + return (); + } + + # Split profile URL - matches all non-empty strings, so no test. + $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,; + + my $proto = $1 || "http://"; + my $hostport = $2; + my $prefix = $3; + my $profile = $4 || "/"; + + my $host = $hostport; + $host =~ s/:.*//; + + my $baseurl = "$proto$hostport$prefix"; + return ($host, $baseurl, $profile); +} + +# We fetch symbols from the first profile argument. +sub SymbolPageURL { + my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); + return "$baseURL$SYMBOL_PAGE"; +} + +sub FetchProgramName() { + my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); + my $url = "$baseURL$PROGRAM_NAME_PAGE"; + my $command_line = ShellEscape(@URL_FETCHER, $url); + open(CMDLINE, "$command_line |") or error($command_line); + my $cmdline = ; + $cmdline =~ s/\r//g; # turn windows-looking lines into unix-looking lines + close(CMDLINE); + error("Failed to get program name from $url\n") unless defined($cmdline); + $cmdline =~ s/\x00.+//; # Remove argv[1] and latters. + $cmdline =~ s!\n!!g; # Remove LFs. + return $cmdline; +} + +# Gee, curl's -L (--location) option isn't reliable at least +# with its 7.12.3 version. Curl will forget to post data if +# there is a redirection. This function is a workaround for +# curl. Redirection happens on borg hosts. +sub ResolveRedirectionForCurl { + my $url = shift; + my $command_line = ShellEscape(@URL_FETCHER, "--head", $url); + open(CMDLINE, "$command_line |") or error($command_line); + while () { + s/\r//g; # turn windows-looking lines into unix-looking lines + if (/^Location: (.*)/) { + $url = $1; + } + } + close(CMDLINE); + return $url; +} + +# Add a timeout flat to URL_FETCHER. Returns a new list. +sub AddFetchTimeout { + my $timeout = shift; + my @fetcher = @_; + if (defined($timeout)) { + if (join(" ", @fetcher) =~ m/\bcurl -s/) { + push(@fetcher, "--max-time", sprintf("%d", $timeout)); + } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) { + push(@fetcher, sprintf("--deadline=%d", $timeout)); + } + } + return @fetcher; +} + +# Reads a symbol map from the file handle name given as $1, returning +# the resulting symbol map. Also processes variables relating to symbols. +# Currently, the only variable processed is 'binary=' which updates +# $main::prog to have the correct program name. +sub ReadSymbols { + my $in = shift; + my $map = {}; + while (<$in>) { + s/\r//g; # turn windows-looking lines into unix-looking lines + # Removes all the leading zeroes from the symbols, see comment below. + if (m/^0x0*([0-9a-f]+)\s+(.+)/) { + $map->{$1} = $2; + } elsif (m/^---/) { + last; + } elsif (m/^([a-z][^=]*)=(.*)$/ ) { + my ($variable, $value) = ($1, $2); + for ($variable, $value) { + s/^\s+//; + s/\s+$//; + } + if ($variable eq "binary") { + if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) { + printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n", + $main::prog, $value); + } + $main::prog = $value; + } else { + printf STDERR ("Ignoring unknown variable in symbols list: " . + "'%s' = '%s'\n", $variable, $value); + } + } + } + return $map; +} + +sub URLEncode { + my $str = shift; + $str =~ s/([^A-Za-z0-9\-_.!~*'()])/ sprintf "%%%02x", ord $1 /eg; + return $str; +} + +sub AppendSymbolFilterParams { + my $url = shift; + my @params = (); + if ($main::opt_retain ne '') { + push(@params, sprintf("retain=%s", URLEncode($main::opt_retain))); + } + if ($main::opt_exclude ne '') { + push(@params, sprintf("exclude=%s", URLEncode($main::opt_exclude))); + } + if (scalar @params > 0) { + $url = sprintf("%s?%s", $url, join("&", @params)); + } + return $url; +} + +# Fetches and processes symbols to prepare them for use in the profile output +# code. If the optional 'symbol_map' arg is not given, fetches symbols from +# $SYMBOL_PAGE for all PC values found in profile. Otherwise, the raw symbols +# are assumed to have already been fetched into 'symbol_map' and are simply +# extracted and processed. +sub FetchSymbols { + my $pcset = shift; + my $symbol_map = shift; + + my %seen = (); + my @pcs = grep { !$seen{$_}++ } keys(%$pcset); # uniq + + if (!defined($symbol_map)) { + my $post_data = join("+", sort((map {"0x" . "$_"} @pcs))); + + open(POSTFILE, ">$main::tmpfile_sym"); + print POSTFILE $post_data; + close(POSTFILE); + + my $url = SymbolPageURL(); + + my $command_line; + if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) { + $url = ResolveRedirectionForCurl($url); + $url = AppendSymbolFilterParams($url); + $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym", + $url); + } else { + $url = AppendSymbolFilterParams($url); + $command_line = (ShellEscape(@URL_FETCHER, "--post", $url) + . " < " . ShellEscape($main::tmpfile_sym)); + } + # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols. + my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"}); + open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line); + $symbol_map = ReadSymbols(*SYMBOL{IO}); + close(SYMBOL); + } + + my $symbols = {}; + foreach my $pc (@pcs) { + my $fullname; + # For 64 bits binaries, symbols are extracted with 8 leading zeroes. + # Then /symbol reads the long symbols in as uint64, and outputs + # the result with a "0x%08llx" format which get rid of the zeroes. + # By removing all the leading zeroes in both $pc and the symbols from + # /symbol, the symbols match and are retrievable from the map. + my $shortpc = $pc; + $shortpc =~ s/^0*//; + # Each line may have a list of names, which includes the function + # and also other functions it has inlined. They are separated (in + # PrintSymbolizedProfile), by --, which is illegal in function names. + my $fullnames; + if (defined($symbol_map->{$shortpc})) { + $fullnames = $symbol_map->{$shortpc}; + } else { + $fullnames = "0x" . $pc; # Just use addresses + } + my $sym = []; + $symbols->{$pc} = $sym; + foreach my $fullname (split("--", $fullnames)) { + my $name = ShortFunctionName($fullname); + push(@{$sym}, $name, "?", $fullname); + } + } + return $symbols; +} + +sub BaseName { + my $file_name = shift; + $file_name =~ s!^.*/!!; # Remove directory name + return $file_name; +} + +sub MakeProfileBaseName { + my ($binary_name, $profile_name) = @_; + my ($host, $baseURL, $path) = ParseProfileURL($profile_name); + my $binary_shortname = BaseName($binary_name); + return sprintf("%s.%s.%s", + $binary_shortname, $main::op_time, $host); +} + +sub FetchDynamicProfile { + my $binary_name = shift; + my $profile_name = shift; + my $fetch_name_only = shift; + my $encourage_patience = shift; + + if (!IsProfileURL($profile_name)) { + return $profile_name; + } else { + my ($host, $baseURL, $path) = ParseProfileURL($profile_name); + if ($path eq "" || $path eq "/") { + # Missing type specifier defaults to cpu-profile + $path = $PROFILE_PAGE; + } + + my $profile_file = MakeProfileBaseName($binary_name, $profile_name); + + my $url = "$baseURL$path"; + my $fetch_timeout = undef; + if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) { + if ($path =~ m/[?]/) { + $url .= "&"; + } else { + $url .= "?"; + } + $url .= sprintf("seconds=%d", $main::opt_seconds); + $fetch_timeout = $main::opt_seconds * 1.01 + 60; + # Set $profile_type for consumption by PrintSymbolizedProfile. + $main::profile_type = 'cpu'; + } else { + # For non-CPU profiles, we add a type-extension to + # the target profile file name. + my $suffix = $path; + $suffix =~ s,/,.,g; + $profile_file .= $suffix; + # Set $profile_type for consumption by PrintSymbolizedProfile. + if ($path =~ m/$HEAP_PAGE/) { + $main::profile_type = 'heap'; + } elsif ($path =~ m/$GROWTH_PAGE/) { + $main::profile_type = 'growth'; + } elsif ($path =~ m/$CONTENTION_PAGE/) { + $main::profile_type = 'contention'; + } + } + + my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof"); + if (! -d $profile_dir) { + mkdir($profile_dir) + || die("Unable to create profile directory $profile_dir: $!\n"); + } + my $tmp_profile = "$profile_dir/.tmp.$profile_file"; + my $real_profile = "$profile_dir/$profile_file"; + + if ($fetch_name_only > 0) { + return $real_profile; + } + + my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER); + my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile); + if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){ + print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n ${real_profile}\n"; + if ($encourage_patience) { + print STDERR "Be patient...\n"; + } + } else { + print STDERR "Fetching $path profile from $url to\n ${real_profile}\n"; + } + + (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n"); + (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n"); + print STDERR "Wrote profile to $real_profile\n"; + $main::collected_profile = $real_profile; + return $main::collected_profile; + } +} + +# Collect profiles in parallel +sub FetchDynamicProfiles { + my $items = scalar(@main::pfile_args); + my $levels = log($items) / log(2); + + if ($items == 1) { + $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1); + } else { + # math rounding issues + if ((2 ** $levels) < $items) { + $levels++; + } + my $count = scalar(@main::pfile_args); + for (my $i = 0; $i < $count; $i++) { + $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0); + } + print STDERR "Fetching $count profiles, Be patient...\n"; + FetchDynamicProfilesRecurse($levels, 0, 0); + $main::collected_profile = join(" \\\n ", @main::profile_files); + } +} + +# Recursively fork a process to get enough processes +# collecting profiles +sub FetchDynamicProfilesRecurse { + my $maxlevel = shift; + my $level = shift; + my $position = shift; + + if (my $pid = fork()) { + $position = 0 | ($position << 1); + TryCollectProfile($maxlevel, $level, $position); + wait; + } else { + $position = 1 | ($position << 1); + TryCollectProfile($maxlevel, $level, $position); + cleanup(); + exit(0); + } +} + +# Collect a single profile +sub TryCollectProfile { + my $maxlevel = shift; + my $level = shift; + my $position = shift; + + if ($level >= ($maxlevel - 1)) { + if ($position < scalar(@main::pfile_args)) { + FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0); + } + } else { + FetchDynamicProfilesRecurse($maxlevel, $level+1, $position); + } +} + +##### Parsing code ##### + +# Provide a small streaming-read module to handle very large +# cpu-profile files. Stream in chunks along a sliding window. +# Provides an interface to get one 'slot', correctly handling +# endian-ness differences. A slot is one 32-bit or 64-bit word +# (depending on the input profile). We tell endianness and bit-size +# for the profile by looking at the first 8 bytes: in cpu profiles, +# the second slot is always 3 (we'll accept anything that's not 0). +BEGIN { + package CpuProfileStream; + + sub new { + my ($class, $file, $fname) = @_; + my $self = { file => $file, + base => 0, + stride => 512 * 1024, # must be a multiple of bitsize/8 + slots => [], + unpack_code => "", # N for big-endian, V for little + perl_is_64bit => 1, # matters if profile is 64-bit + }; + bless $self, $class; + # Let unittests adjust the stride + if ($main::opt_test_stride > 0) { + $self->{stride} = $main::opt_test_stride; + } + # Read the first two slots to figure out bitsize and endianness. + my $slots = $self->{slots}; + my $str; + read($self->{file}, $str, 8); + # Set the global $address_length based on what we see here. + # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars). + $address_length = ($str eq (chr(0)x8)) ? 16 : 8; + if ($address_length == 8) { + if (substr($str, 6, 2) eq chr(0)x2) { + $self->{unpack_code} = 'V'; # Little-endian. + } elsif (substr($str, 4, 2) eq chr(0)x2) { + $self->{unpack_code} = 'N'; # Big-endian + } else { + ::error("$fname: header size >= 2**16\n"); + } + @$slots = unpack($self->{unpack_code} . "*", $str); + } else { + # If we're a 64-bit profile, check if we're a 64-bit-capable + # perl. Otherwise, each slot will be represented as a float + # instead of an int64, losing precision and making all the + # 64-bit addresses wrong. We won't complain yet, but will + # later if we ever see a value that doesn't fit in 32 bits. + my $has_q = 0; + eval { $has_q = pack("Q", "1") ? 1 : 1; }; + if (!$has_q) { + $self->{perl_is_64bit} = 0; + } + read($self->{file}, $str, 8); + if (substr($str, 4, 4) eq chr(0)x4) { + # We'd love to use 'Q', but it's a) not universal, b) not endian-proof. + $self->{unpack_code} = 'V'; # Little-endian. + } elsif (substr($str, 0, 4) eq chr(0)x4) { + $self->{unpack_code} = 'N'; # Big-endian + } else { + ::error("$fname: header size >= 2**32\n"); + } + my @pair = unpack($self->{unpack_code} . "*", $str); + # Since we know one of the pair is 0, it's fine to just add them. + @$slots = (0, $pair[0] + $pair[1]); + } + return $self; + } + + # Load more data when we access slots->get(X) which is not yet in memory. + sub overflow { + my ($self) = @_; + my $slots = $self->{slots}; + $self->{base} += $#$slots + 1; # skip over data we're replacing + my $str; + read($self->{file}, $str, $self->{stride}); + if ($address_length == 8) { # the 32-bit case + # This is the easy case: unpack provides 32-bit unpacking primitives. + @$slots = unpack($self->{unpack_code} . "*", $str); + } else { + # We need to unpack 32 bits at a time and combine. + my @b32_values = unpack($self->{unpack_code} . "*", $str); + my @b64_values = (); + for (my $i = 0; $i < $#b32_values; $i += 2) { + # TODO(csilvers): if this is a 32-bit perl, the math below + # could end up in a too-large int, which perl will promote + # to a double, losing necessary precision. Deal with that. + # Right now, we just die. + my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]); + if ($self->{unpack_code} eq 'N') { # big-endian + ($lo, $hi) = ($hi, $lo); + } + my $value = $lo + $hi * (2**32); + if (!$self->{perl_is_64bit} && # check value is exactly represented + (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) { + ::error("Need a 64-bit perl to process this 64-bit profile.\n"); + } + push(@b64_values, $value); + } + @$slots = @b64_values; + } + } + + # Access the i-th long in the file (logically), or -1 at EOF. + sub get { + my ($self, $idx) = @_; + my $slots = $self->{slots}; + while ($#$slots >= 0) { + if ($idx < $self->{base}) { + # The only time we expect a reference to $slots[$i - something] + # after referencing $slots[$i] is reading the very first header. + # Since $stride > |header|, that shouldn't cause any lookback + # errors. And everything after the header is sequential. + print STDERR "Unexpected look-back reading CPU profile"; + return -1; # shrug, don't know what better to return + } elsif ($idx > $self->{base} + $#$slots) { + $self->overflow(); + } else { + return $slots->[$idx - $self->{base}]; + } + } + # If we get here, $slots is [], which means we've reached EOF + return -1; # unique since slots is supposed to hold unsigned numbers + } +} + +# Reads the top, 'header' section of a profile, and returns the last +# line of the header, commonly called a 'header line'. The header +# section of a profile consists of zero or more 'command' lines that +# are instructions to jeprof, which jeprof executes when reading the +# header. All 'command' lines start with a %. After the command +# lines is the 'header line', which is a profile-specific line that +# indicates what type of profile it is, and perhaps other global +# information about the profile. For instance, here's a header line +# for a heap profile: +# heap profile: 53: 38236 [ 5525: 1284029] @ heapprofile +# For historical reasons, the CPU profile does not contain a text- +# readable header line. If the profile looks like a CPU profile, +# this function returns "". If no header line could be found, this +# function returns undef. +# +# The following commands are recognized: +# %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:' +# +# The input file should be in binmode. +sub ReadProfileHeader { + local *PROFILE = shift; + my $firstchar = ""; + my $line = ""; + read(PROFILE, $firstchar, 1); + seek(PROFILE, -1, 1); # unread the firstchar + if ($firstchar !~ /[[:print:]]/) { # is not a text character + return ""; + } + while (defined($line = )) { + $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines + if ($line =~ /^%warn\s+(.*)/) { # 'warn' command + # Note this matches both '%warn blah\n' and '%warn\n'. + print STDERR "WARNING: $1\n"; # print the rest of the line + } elsif ($line =~ /^%/) { + print STDERR "Ignoring unknown command from profile header: $line"; + } else { + # End of commands, must be the header line. + return $line; + } + } + return undef; # got to EOF without seeing a header line +} + +sub IsSymbolizedProfileFile { + my $file_name = shift; + if (!(-e $file_name) || !(-r $file_name)) { + return 0; + } + # Check if the file contains a symbol-section marker. + open(TFILE, "<$file_name"); + binmode TFILE; + my $firstline = ReadProfileHeader(*TFILE); + close(TFILE); + if (!$firstline) { + return 0; + } + $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash + my $symbol_marker = $&; + return $firstline =~ /^--- *$symbol_marker/; +} + +# Parse profile generated by common/profiler.cc and return a reference +# to a map: +# $result->{version} Version number of profile file +# $result->{period} Sampling period (in microseconds) +# $result->{profile} Profile object +# $result->{threads} Map of thread IDs to profile objects +# $result->{map} Memory map info from profile +# $result->{pcs} Hash of all PC values seen, key is hex address +sub ReadProfile { + my $prog = shift; + my $fname = shift; + my $result; # return value + + $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash + my $contention_marker = $&; + $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash + my $growth_marker = $&; + $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash + my $symbol_marker = $&; + $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash + my $profile_marker = $&; + $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash + my $heap_marker = $&; + + # Look at first line to see if it is a heap or a CPU profile. + # CPU profile may start with no header at all, and just binary data + # (starting with \0\0\0\0) -- in that case, don't try to read the + # whole firstline, since it may be gigabytes(!) of data. + open(PROFILE, "<$fname") || error("$fname: $!\n"); + binmode PROFILE; # New perls do UTF-8 processing + my $header = ReadProfileHeader(*PROFILE); + if (!defined($header)) { # means "at EOF" + error("Profile is empty.\n"); + } + + my $symbols; + if ($header =~ m/^--- *$symbol_marker/o) { + # Verify that the user asked for a symbolized profile + if (!$main::use_symbolized_profile) { + # we have both a binary and symbolized profiles, abort + error("FATAL ERROR: Symbolized profile\n $fname\ncannot be used with " . + "a binary arg. Try again without passing\n $prog\n"); + } + # Read the symbol section of the symbolized profile file. + $symbols = ReadSymbols(*PROFILE{IO}); + # Read the next line to get the header for the remaining profile. + $header = ReadProfileHeader(*PROFILE) || ""; + } + + if ($header =~ m/^--- *($heap_marker|$growth_marker)/o) { + # Skip "--- ..." line for profile types that have their own headers. + $header = ReadProfileHeader(*PROFILE) || ""; + } + + $main::profile_type = ''; + + if ($header =~ m/^heap profile:.*$growth_marker/o) { + $main::profile_type = 'growth'; + $result = ReadHeapProfile($prog, *PROFILE, $header); + } elsif ($header =~ m/^heap profile:/) { + $main::profile_type = 'heap'; + $result = ReadHeapProfile($prog, *PROFILE, $header); + } elsif ($header =~ m/^heap/) { + $main::profile_type = 'heap'; + $result = ReadThreadedHeapProfile($prog, $fname, $header); + } elsif ($header =~ m/^--- *$contention_marker/o) { + $main::profile_type = 'contention'; + $result = ReadSynchProfile($prog, *PROFILE); + } elsif ($header =~ m/^--- *Stacks:/) { + print STDERR + "Old format contention profile: mistakenly reports " . + "condition variable signals as lock contentions.\n"; + $main::profile_type = 'contention'; + $result = ReadSynchProfile($prog, *PROFILE); + } elsif ($header =~ m/^--- *$profile_marker/) { + # the binary cpu profile data starts immediately after this line + $main::profile_type = 'cpu'; + $result = ReadCPUProfile($prog, $fname, *PROFILE); + } else { + if (defined($symbols)) { + # a symbolized profile contains a format we don't recognize, bail out + error("$fname: Cannot recognize profile section after symbols.\n"); + } + # no ascii header present -- must be a CPU profile + $main::profile_type = 'cpu'; + $result = ReadCPUProfile($prog, $fname, *PROFILE); + } + + close(PROFILE); + + # if we got symbols along with the profile, return those as well + if (defined($symbols)) { + $result->{symbols} = $symbols; + } + + return $result; +} + +# Subtract one from caller pc so we map back to call instr. +# However, don't do this if we're reading a symbolized profile +# file, in which case the subtract-one was done when the file +# was written. +# +# We apply the same logic to all readers, though ReadCPUProfile uses an +# independent implementation. +sub FixCallerAddresses { + my $stack = shift; + # --raw/http: Always subtract one from pc's, because PrintSymbolizedProfile() + # dumps unadjusted profiles. + { + $stack =~ /(\s)/; + my $delimiter = $1; + my @addrs = split(' ', $stack); + my @fixedaddrs; + $#fixedaddrs = $#addrs; + if ($#addrs >= 0) { + $fixedaddrs[0] = $addrs[0]; + } + for (my $i = 1; $i <= $#addrs; $i++) { + $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1"); + } + return join $delimiter, @fixedaddrs; + } +} + +# CPU profile reader +sub ReadCPUProfile { + my $prog = shift; + my $fname = shift; # just used for logging + local *PROFILE = shift; + my $version; + my $period; + my $i; + my $profile = {}; + my $pcs = {}; + + # Parse string into array of slots. + my $slots = CpuProfileStream->new(*PROFILE, $fname); + + # Read header. The current header version is a 5-element structure + # containing: + # 0: header count (always 0) + # 1: header "words" (after this one: 3) + # 2: format version (0) + # 3: sampling period (usec) + # 4: unused padding (always 0) + if ($slots->get(0) != 0 ) { + error("$fname: not a profile file, or old format profile file\n"); + } + $i = 2 + $slots->get(1); + $version = $slots->get(2); + $period = $slots->get(3); + # Do some sanity checking on these header values. + if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) { + error("$fname: not a profile file, or corrupted profile file\n"); + } + + # Parse profile + while ($slots->get($i) != -1) { + my $n = $slots->get($i++); + my $d = $slots->get($i++); + if ($d > (2**16)) { # TODO(csilvers): what's a reasonable max-stack-depth? + my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8)); + print STDERR "At index $i (address $addr):\n"; + error("$fname: stack trace depth >= 2**32\n"); + } + if ($slots->get($i) == 0) { + # End of profile data marker + $i += $d; + last; + } + + # Make key out of the stack entries + my @k = (); + for (my $j = 0; $j < $d; $j++) { + my $pc = $slots->get($i+$j); + # Subtract one from caller pc so we map back to call instr. + $pc--; + $pc = sprintf("%0*x", $address_length, $pc); + $pcs->{$pc} = 1; + push @k, $pc; + } + + AddEntry($profile, (join "\n", @k), $n); + $i += $d; + } + + # Parse map + my $map = ''; + seek(PROFILE, $i * 4, 0); + read(PROFILE, $map, (stat PROFILE)[7]); + + my $r = {}; + $r->{version} = $version; + $r->{period} = $period; + $r->{profile} = $profile; + $r->{libs} = ParseLibraries($prog, $map, $pcs); + $r->{pcs} = $pcs; + + return $r; +} + +sub HeapProfileIndex { + my $index = 1; + if ($main::opt_inuse_space) { + $index = 1; + } elsif ($main::opt_inuse_objects) { + $index = 0; + } elsif ($main::opt_alloc_space) { + $index = 3; + } elsif ($main::opt_alloc_objects) { + $index = 2; + } + return $index; +} + +sub ReadMappedLibraries { + my $fh = shift; + my $map = ""; + # Read the /proc/self/maps data + while (<$fh>) { + s/\r//g; # turn windows-looking lines into unix-looking lines + $map .= $_; + } + return $map; +} + +sub ReadMemoryMap { + my $fh = shift; + my $map = ""; + # Read /proc/self/maps data as formatted by DumpAddressMap() + my $buildvar = ""; + while () { + s/\r//g; # turn windows-looking lines into unix-looking lines + # Parse "build=" specification if supplied + if (m/^\s*build=(.*)\n/) { + $buildvar = $1; + } + + # Expand "$build" variable if available + $_ =~ s/\$build\b/$buildvar/g; + + $map .= $_; + } + return $map; +} + +sub AdjustSamples { + my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_; + if ($sample_adjustment) { + if ($sampling_algorithm == 2) { + # Remote-heap version 2 + # The sampling frequency is the rate of a Poisson process. + # This means that the probability of sampling an allocation of + # size X with sampling rate Y is 1 - exp(-X/Y) + if ($n1 != 0) { + my $ratio = (($s1*1.0)/$n1)/($sample_adjustment); + my $scale_factor = 1/(1 - exp(-$ratio)); + $n1 *= $scale_factor; + $s1 *= $scale_factor; + } + if ($n2 != 0) { + my $ratio = (($s2*1.0)/$n2)/($sample_adjustment); + my $scale_factor = 1/(1 - exp(-$ratio)); + $n2 *= $scale_factor; + $s2 *= $scale_factor; + } + } else { + # Remote-heap version 1 + my $ratio; + $ratio = (($s1*1.0)/$n1)/($sample_adjustment); + if ($ratio < 1) { + $n1 /= $ratio; + $s1 /= $ratio; + } + $ratio = (($s2*1.0)/$n2)/($sample_adjustment); + if ($ratio < 1) { + $n2 /= $ratio; + $s2 /= $ratio; + } + } + } + return ($n1, $s1, $n2, $s2); +} + +sub ReadHeapProfile { + my $prog = shift; + local *PROFILE = shift; + my $header = shift; + + my $index = HeapProfileIndex(); + + # Find the type of this profile. The header line looks like: + # heap profile: 1246: 8800744 [ 1246: 8800744] @ /266053 + # There are two pairs , the first inuse objects/space, and the + # second allocated objects/space. This is followed optionally by a profile + # type, and if that is present, optionally by a sampling frequency. + # For remote heap profiles (v1): + # The interpretation of the sampling frequency is that the profiler, for + # each sample, calculates a uniformly distributed random integer less than + # the given value, and records the next sample after that many bytes have + # been allocated. Therefore, the expected sample interval is half of the + # given frequency. By default, if not specified, the expected sample + # interval is 128KB. Only remote-heap-page profiles are adjusted for + # sample size. + # For remote heap profiles (v2): + # The sampling frequency is the rate of a Poisson process. This means that + # the probability of sampling an allocation of size X with sampling rate Y + # is 1 - exp(-X/Y) + # For version 2, a typical header line might look like this: + # heap profile: 1922: 127792360 [ 1922: 127792360] @ _v2/524288 + # the trailing number (524288) is the sampling rate. (Version 1 showed + # double the 'rate' here) + my $sampling_algorithm = 0; + my $sample_adjustment = 0; + chomp($header); + my $type = "unknown"; + if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") { + if (defined($6) && ($6 ne '')) { + $type = $6; + my $sample_period = $8; + # $type is "heapprofile" for profiles generated by the + # heap-profiler, and either "heap" or "heap_v2" for profiles + # generated by sampling directly within tcmalloc. It can also + # be "growth" for heap-growth profiles. The first is typically + # found for profiles generated locally, and the others for + # remote profiles. + if (($type eq "heapprofile") || ($type !~ /heap/) ) { + # No need to adjust for the sampling rate with heap-profiler-derived data + $sampling_algorithm = 0; + } elsif ($type =~ /_v2/) { + $sampling_algorithm = 2; # version 2 sampling + if (defined($sample_period) && ($sample_period ne '')) { + $sample_adjustment = int($sample_period); + } + } else { + $sampling_algorithm = 1; # version 1 sampling + if (defined($sample_period) && ($sample_period ne '')) { + $sample_adjustment = int($sample_period)/2; + } + } + } else { + # We detect whether or not this is a remote-heap profile by checking + # that the total-allocated stats ($n2,$s2) are exactly the + # same as the in-use stats ($n1,$s1). It is remotely conceivable + # that a non-remote-heap profile may pass this check, but it is hard + # to imagine how that could happen. + # In this case it's so old it's guaranteed to be remote-heap version 1. + my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); + if (($n1 == $n2) && ($s1 == $s2)) { + # This is likely to be a remote-heap based sample profile + $sampling_algorithm = 1; + } + } + } + + if ($sampling_algorithm > 0) { + # For remote-heap generated profiles, adjust the counts and sizes to + # account for the sample rate (we sample once every 128KB by default). + if ($sample_adjustment == 0) { + # Turn on profile adjustment. + $sample_adjustment = 128*1024; + print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n"; + } else { + printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n", + $sample_adjustment); + } + if ($sampling_algorithm > 1) { + # We don't bother printing anything for the original version (version 1) + printf STDERR "Heap version $sampling_algorithm\n"; + } + } + + my $profile = {}; + my $pcs = {}; + my $map = ""; + + while () { + s/\r//g; # turn windows-looking lines into unix-looking lines + if (/^MAPPED_LIBRARIES:/) { + $map .= ReadMappedLibraries(*PROFILE); + last; + } + + if (/^--- Memory map:/) { + $map .= ReadMemoryMap(*PROFILE); + last; + } + + # Read entry of the form: + # : [: ] @ a1 a2 a3 ... an + s/^\s*//; + s/\s*$//; + if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) { + my $stack = $5; + my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); + my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm, + $n1, $s1, $n2, $s2); + AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); + } + } + + my $r = {}; + $r->{version} = "heap"; + $r->{period} = 1; + $r->{profile} = $profile; + $r->{libs} = ParseLibraries($prog, $map, $pcs); + $r->{pcs} = $pcs; + return $r; +} + +sub ReadThreadedHeapProfile { + my ($prog, $fname, $header) = @_; + + my $index = HeapProfileIndex(); + my $sampling_algorithm = 0; + my $sample_adjustment = 0; + chomp($header); + my $type = "unknown"; + # Assuming a very specific type of header for now. + if ($header =~ m"^heap_v2/(\d+)") { + $type = "_v2"; + $sampling_algorithm = 2; + $sample_adjustment = int($1); + } + if ($type ne "_v2" || !defined($sample_adjustment)) { + die "Threaded heap profiles require v2 sampling with a sample rate\n"; + } + + my $profile = {}; + my $thread_profiles = {}; + my $pcs = {}; + my $map = ""; + my $stack = ""; + + while () { + s/\r//g; + if (/^MAPPED_LIBRARIES:/) { + $map .= ReadMappedLibraries(*PROFILE); + last; + } + + if (/^--- Memory map:/) { + $map .= ReadMemoryMap(*PROFILE); + last; + } + + # Read entry of the form: + # @ a1 a2 ... an + # t*: : [: ] + # t1: : [: ] + # ... + # tn: : [: ] + s/^\s*//; + s/\s*$//; + if (m/^@\s+(.*)$/) { + $stack = $1; + } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) { + if ($stack eq "") { + # Still in the header, so this is just a per-thread summary. + next; + } + my $thread = $2; + my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6); + my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm, + $n1, $s1, $n2, $s2); + if ($thread eq "*") { + AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); + } else { + if (!exists($thread_profiles->{$thread})) { + $thread_profiles->{$thread} = {}; + } + AddEntries($thread_profiles->{$thread}, $pcs, + FixCallerAddresses($stack), $counts[$index]); + } + } + } + + my $r = {}; + $r->{version} = "heap"; + $r->{period} = 1; + $r->{profile} = $profile; + $r->{threads} = $thread_profiles; + $r->{libs} = ParseLibraries($prog, $map, $pcs); + $r->{pcs} = $pcs; + return $r; +} + +sub ReadSynchProfile { + my $prog = shift; + local *PROFILE = shift; + my $header = shift; + + my $map = ''; + my $profile = {}; + my $pcs = {}; + my $sampling_period = 1; + my $cyclespernanosec = 2.8; # Default assumption for old binaries + my $seen_clockrate = 0; + my $line; + + my $index = 0; + if ($main::opt_total_delay) { + $index = 0; + } elsif ($main::opt_contentions) { + $index = 1; + } elsif ($main::opt_mean_delay) { + $index = 2; + } + + while ( $line = ) { + $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines + if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) { + my ($cycles, $count, $stack) = ($1, $2, $3); + + # Convert cycles to nanoseconds + $cycles /= $cyclespernanosec; + + # Adjust for sampling done by application + $cycles *= $sampling_period; + $count *= $sampling_period; + + my @values = ($cycles, $count, $cycles / $count); + AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]); + + } elsif ( $line =~ /^(slow release).*thread \d+ \@\s*(.*?)\s*$/ || + $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) { + my ($cycles, $stack) = ($1, $2); + if ($cycles !~ /^\d+$/) { + next; + } + + # Convert cycles to nanoseconds + $cycles /= $cyclespernanosec; + + # Adjust for sampling done by application + $cycles *= $sampling_period; + + AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles); + + } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) { + my ($variable, $value) = ($1,$2); + for ($variable, $value) { + s/^\s+//; + s/\s+$//; + } + if ($variable eq "cycles/second") { + $cyclespernanosec = $value / 1e9; + $seen_clockrate = 1; + } elsif ($variable eq "sampling period") { + $sampling_period = $value; + } elsif ($variable eq "ms since reset") { + # Currently nothing is done with this value in jeprof + # So we just silently ignore it for now + } elsif ($variable eq "discarded samples") { + # Currently nothing is done with this value in jeprof + # So we just silently ignore it for now + } else { + printf STDERR ("Ignoring unnknown variable in /contention output: " . + "'%s' = '%s'\n",$variable,$value); + } + } else { + # Memory map entry + $map .= $line; + } + } + + if (!$seen_clockrate) { + printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n", + $cyclespernanosec); + } + + my $r = {}; + $r->{version} = 0; + $r->{period} = $sampling_period; + $r->{profile} = $profile; + $r->{libs} = ParseLibraries($prog, $map, $pcs); + $r->{pcs} = $pcs; + return $r; +} + +# Given a hex value in the form "0x1abcd" or "1abcd", return either +# "0001abcd" or "000000000001abcd", depending on the current (global) +# address length. +sub HexExtend { + my $addr = shift; + + $addr =~ s/^(0x)?0*//; + my $zeros_needed = $address_length - length($addr); + if ($zeros_needed < 0) { + printf STDERR "Warning: address $addr is longer than address length $address_length\n"; + return $addr; + } + return ("0" x $zeros_needed) . $addr; +} + +##### Symbol extraction ##### + +# Aggressively search the lib_prefix values for the given library +# If all else fails, just return the name of the library unmodified. +# If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so" +# it will search the following locations in this order, until it finds a file: +# /my/path/lib/dir/mylib.so +# /other/path/lib/dir/mylib.so +# /my/path/dir/mylib.so +# /other/path/dir/mylib.so +# /my/path/mylib.so +# /other/path/mylib.so +# /lib/dir/mylib.so (returned as last resort) +sub FindLibrary { + my $file = shift; + my $suffix = $file; + + # Search for the library as described above + do { + foreach my $prefix (@prefix_list) { + my $fullpath = $prefix . $suffix; + if (-e $fullpath) { + return $fullpath; + } + } + } while ($suffix =~ s|^/[^/]+/|/|); + return $file; +} + +# Return path to library with debugging symbols. +# For libc libraries, the copy in /usr/lib/debug contains debugging symbols +sub DebuggingLibrary { + my $file = shift; + if ($file =~ m|^/|) { + if (-f "/usr/lib/debug$file") { + return "/usr/lib/debug$file"; + } elsif (-f "/usr/lib/debug$file.debug") { + return "/usr/lib/debug$file.debug"; + } + } + return undef; +} + +# Parse text section header of a library using objdump +sub ParseTextSectionHeaderFromObjdump { + my $lib = shift; + + my $size = undef; + my $vma; + my $file_offset; + # Get objdump output from the library file to figure out how to + # map between mapped addresses and addresses in the library. + my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib); + open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); + while () { + s/\r//g; # turn windows-looking lines into unix-looking lines + # Idx Name Size VMA LMA File off Algn + # 10 .text 00104b2c 420156f0 420156f0 000156f0 2**4 + # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file + # offset may still be 8. But AddressSub below will still handle that. + my @x = split; + if (($#x >= 6) && ($x[1] eq '.text')) { + $size = $x[2]; + $vma = $x[3]; + $file_offset = $x[5]; + last; + } + } + close(OBJDUMP); + + if (!defined($size)) { + return undef; + } + + my $r = {}; + $r->{size} = $size; + $r->{vma} = $vma; + $r->{file_offset} = $file_offset; + + return $r; +} + +# Parse text section header of a library using otool (on OS X) +sub ParseTextSectionHeaderFromOtool { + my $lib = shift; + + my $size = undef; + my $vma = undef; + my $file_offset = undef; + # Get otool output from the library file to figure out how to + # map between mapped addresses and addresses in the library. + my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib); + open(OTOOL, "$command |") || error("$command: $!\n"); + my $cmd = ""; + my $sectname = ""; + my $segname = ""; + foreach my $line () { + $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines + # Load command <#> + # cmd LC_SEGMENT + # [...] + # Section + # sectname __text + # segname __TEXT + # addr 0x000009f8 + # size 0x00018b9e + # offset 2552 + # align 2^2 (4) + # We will need to strip off the leading 0x from the hex addresses, + # and convert the offset into hex. + if ($line =~ /Load command/) { + $cmd = ""; + $sectname = ""; + $segname = ""; + } elsif ($line =~ /Section/) { + $sectname = ""; + $segname = ""; + } elsif ($line =~ /cmd (\w+)/) { + $cmd = $1; + } elsif ($line =~ /sectname (\w+)/) { + $sectname = $1; + } elsif ($line =~ /segname (\w+)/) { + $segname = $1; + } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") && + $sectname eq "__text" && + $segname eq "__TEXT")) { + next; + } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) { + $vma = $1; + } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) { + $size = $1; + } elsif ($line =~ /\boffset ([0-9]+)/) { + $file_offset = sprintf("%016x", $1); + } + if (defined($vma) && defined($size) && defined($file_offset)) { + last; + } + } + close(OTOOL); + + if (!defined($vma) || !defined($size) || !defined($file_offset)) { + return undef; + } + + my $r = {}; + $r->{size} = $size; + $r->{vma} = $vma; + $r->{file_offset} = $file_offset; + + return $r; +} + +sub ParseTextSectionHeader { + # obj_tool_map("otool") is only defined if we're in a Mach-O environment + if (defined($obj_tool_map{"otool"})) { + my $r = ParseTextSectionHeaderFromOtool(@_); + if (defined($r)){ + return $r; + } + } + # If otool doesn't work, or we don't have it, fall back to objdump + return ParseTextSectionHeaderFromObjdump(@_); +} + +# Split /proc/pid/maps dump into a list of libraries +sub ParseLibraries { + return if $main::use_symbol_page; # We don't need libraries info. + my $prog = Cwd::abs_path(shift); + my $map = shift; + my $pcs = shift; + + my $result = []; + my $h = "[a-f0-9]+"; + my $zero_offset = HexExtend("0"); + + my $buildvar = ""; + foreach my $l (split("\n", $map)) { + if ($l =~ m/^\s*build=(.*)$/) { + $buildvar = $1; + } + + my $start; + my $finish; + my $offset; + my $lib; + if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) { + # Full line from /proc/self/maps. Example: + # 40000000-40015000 r-xp 00000000 03:01 12845071 /lib/ld-2.3.2.so + $start = HexExtend($1); + $finish = HexExtend($2); + $offset = HexExtend($3); + $lib = $4; + $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths + } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) { + # Cooked line from DumpAddressMap. Example: + # 40000000-40015000: /lib/ld-2.3.2.so + $start = HexExtend($1); + $finish = HexExtend($2); + $offset = $zero_offset; + $lib = $3; + } elsif (($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+)$/i) && ($4 eq $prog)) { + # PIEs and address space randomization do not play well with our + # default assumption that main executable is at lowest + # addresses. So we're detecting main executable in + # /proc/self/maps as well. + $start = HexExtend($1); + $finish = HexExtend($2); + $offset = HexExtend($3); + $lib = $4; + $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths + } + # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in + # function procfs_doprocmap (sys/fs/procfs/procfs_map.c) + # + # Example: + # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s + # o.1 NCH -1 + elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) { + $start = HexExtend($1); + $finish = HexExtend($2); + $offset = $zero_offset; + $lib = FindLibrary($5); + + } else { + next; + } + + # Expand "$build" variable if available + $lib =~ s/\$build\b/$buildvar/g; + + $lib = FindLibrary($lib); + + # Check for pre-relocated libraries, which use pre-relocated symbol tables + # and thus require adjusting the offset that we'll use to translate + # VM addresses into symbol table addresses. + # Only do this if we're not going to fetch the symbol table from a + # debugging copy of the library. + if (!DebuggingLibrary($lib)) { + my $text = ParseTextSectionHeader($lib); + if (defined($text)) { + my $vma_offset = AddressSub($text->{vma}, $text->{file_offset}); + $offset = AddressAdd($offset, $vma_offset); + } + } + + if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; } + push(@{$result}, [$lib, $start, $finish, $offset]); + } + + # Append special entry for additional library (not relocated) + if ($main::opt_lib ne "") { + my $text = ParseTextSectionHeader($main::opt_lib); + if (defined($text)) { + my $start = $text->{vma}; + my $finish = AddressAdd($start, $text->{size}); + + push(@{$result}, [$main::opt_lib, $start, $finish, $start]); + } + } + + # Append special entry for the main program. This covers + # 0..max_pc_value_seen, so that we assume pc values not found in one + # of the library ranges will be treated as coming from the main + # program binary. + my $min_pc = HexExtend("0"); + my $max_pc = $min_pc; # find the maximal PC value in any sample + foreach my $pc (keys(%{$pcs})) { + if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); } + } + push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]); + + return $result; +} + +# Add two hex addresses of length $address_length. +# Run jeprof --test for unit test if this is changed. +sub AddressAdd { + my $addr1 = shift; + my $addr2 = shift; + my $sum; + + if ($address_length == 8) { + # Perl doesn't cope with wraparound arithmetic, so do it explicitly: + $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16); + return sprintf("%08x", $sum); + + } else { + # Do the addition in 7-nibble chunks to trivialize carry handling. + + if ($main::opt_debug and $main::opt_test) { + print STDERR "AddressAdd $addr1 + $addr2 = "; + } + + my $a1 = substr($addr1,-7); + $addr1 = substr($addr1,0,-7); + my $a2 = substr($addr2,-7); + $addr2 = substr($addr2,0,-7); + $sum = hex($a1) + hex($a2); + my $c = 0; + if ($sum > 0xfffffff) { + $c = 1; + $sum -= 0x10000000; + } + my $r = sprintf("%07x", $sum); + + $a1 = substr($addr1,-7); + $addr1 = substr($addr1,0,-7); + $a2 = substr($addr2,-7); + $addr2 = substr($addr2,0,-7); + $sum = hex($a1) + hex($a2) + $c; + $c = 0; + if ($sum > 0xfffffff) { + $c = 1; + $sum -= 0x10000000; + } + $r = sprintf("%07x", $sum) . $r; + + $sum = hex($addr1) + hex($addr2) + $c; + if ($sum > 0xff) { $sum -= 0x100; } + $r = sprintf("%02x", $sum) . $r; + + if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; } + + return $r; + } +} + + +# Subtract two hex addresses of length $address_length. +# Run jeprof --test for unit test if this is changed. +sub AddressSub { + my $addr1 = shift; + my $addr2 = shift; + my $diff; + + if ($address_length == 8) { + # Perl doesn't cope with wraparound arithmetic, so do it explicitly: + $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16); + return sprintf("%08x", $diff); + + } else { + # Do the addition in 7-nibble chunks to trivialize borrow handling. + # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; } + + my $a1 = hex(substr($addr1,-7)); + $addr1 = substr($addr1,0,-7); + my $a2 = hex(substr($addr2,-7)); + $addr2 = substr($addr2,0,-7); + my $b = 0; + if ($a2 > $a1) { + $b = 1; + $a1 += 0x10000000; + } + $diff = $a1 - $a2; + my $r = sprintf("%07x", $diff); + + $a1 = hex(substr($addr1,-7)); + $addr1 = substr($addr1,0,-7); + $a2 = hex(substr($addr2,-7)) + $b; + $addr2 = substr($addr2,0,-7); + $b = 0; + if ($a2 > $a1) { + $b = 1; + $a1 += 0x10000000; + } + $diff = $a1 - $a2; + $r = sprintf("%07x", $diff) . $r; + + $a1 = hex($addr1); + $a2 = hex($addr2) + $b; + if ($a2 > $a1) { $a1 += 0x100; } + $diff = $a1 - $a2; + $r = sprintf("%02x", $diff) . $r; + + # if ($main::opt_debug) { print STDERR "$r\n"; } + + return $r; + } +} + +# Increment a hex addresses of length $address_length. +# Run jeprof --test for unit test if this is changed. +sub AddressInc { + my $addr = shift; + my $sum; + + if ($address_length == 8) { + # Perl doesn't cope with wraparound arithmetic, so do it explicitly: + $sum = (hex($addr)+1) % (0x10000000 * 16); + return sprintf("%08x", $sum); + + } else { + # Do the addition in 7-nibble chunks to trivialize carry handling. + # We are always doing this to step through the addresses in a function, + # and will almost never overflow the first chunk, so we check for this + # case and exit early. + + # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; } + + my $a1 = substr($addr,-7); + $addr = substr($addr,0,-7); + $sum = hex($a1) + 1; + my $r = sprintf("%07x", $sum); + if ($sum <= 0xfffffff) { + $r = $addr . $r; + # if ($main::opt_debug) { print STDERR "$r\n"; } + return HexExtend($r); + } else { + $r = "0000000"; + } + + $a1 = substr($addr,-7); + $addr = substr($addr,0,-7); + $sum = hex($a1) + 1; + $r = sprintf("%07x", $sum) . $r; + if ($sum <= 0xfffffff) { + $r = $addr . $r; + # if ($main::opt_debug) { print STDERR "$r\n"; } + return HexExtend($r); + } else { + $r = "00000000000000"; + } + + $sum = hex($addr) + 1; + if ($sum > 0xff) { $sum -= 0x100; } + $r = sprintf("%02x", $sum) . $r; + + # if ($main::opt_debug) { print STDERR "$r\n"; } + return $r; + } +} + +# Extract symbols for all PC values found in profile +sub ExtractSymbols { + my $libs = shift; + my $pcset = shift; + + my $symbols = {}; + + # Map each PC value to the containing library. To make this faster, + # we sort libraries by their starting pc value (highest first), and + # advance through the libraries as we advance the pc. Sometimes the + # addresses of libraries may overlap with the addresses of the main + # binary, so to make sure the libraries 'win', we iterate over the + # libraries in reverse order (which assumes the binary doesn't start + # in the middle of a library, which seems a fair assumption). + my @pcs = (sort { $a cmp $b } keys(%{$pcset})); # pcset is 0-extended strings + foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) { + my $libname = $lib->[0]; + my $start = $lib->[1]; + my $finish = $lib->[2]; + my $offset = $lib->[3]; + + # Use debug library if it exists + my $debug_libname = DebuggingLibrary($libname); + if ($debug_libname) { + $libname = $debug_libname; + } + + # Get list of pcs that belong in this library. + my $contained = []; + my ($start_pc_index, $finish_pc_index); + # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index]. + for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0; + $finish_pc_index--) { + last if $pcs[$finish_pc_index - 1] le $finish; + } + # Find smallest start_pc_index such that $start <= $pc[$start_pc_index]. + for ($start_pc_index = $finish_pc_index; $start_pc_index > 0; + $start_pc_index--) { + last if $pcs[$start_pc_index - 1] lt $start; + } + # This keeps PC values higher than $pc[$finish_pc_index] in @pcs, + # in case there are overlaps in libraries and the main binary. + @{$contained} = splice(@pcs, $start_pc_index, + $finish_pc_index - $start_pc_index); + # Map to symbols + MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols); + } + + return $symbols; +} + +# Map list of PC values to symbols for a given image +sub MapToSymbols { + my $image = shift; + my $offset = shift; + my $pclist = shift; + my $symbols = shift; + + my $debug = 0; + + # Ignore empty binaries + if ($#{$pclist} < 0) { return; } + + # Figure out the addr2line command to use + my $addr2line = $obj_tool_map{"addr2line"}; + my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image); + if (exists $obj_tool_map{"addr2line_pdb"}) { + $addr2line = $obj_tool_map{"addr2line_pdb"}; + $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image); + } + + # If "addr2line" isn't installed on the system at all, just use + # nm to get what info we can (function names, but not line numbers). + if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) { + MapSymbolsWithNM($image, $offset, $pclist, $symbols); + return; + } + + # "addr2line -i" can produce a variable number of lines per input + # address, with no separator that allows us to tell when data for + # the next address starts. So we find the address for a special + # symbol (_fini) and interleave this address between all real + # addresses passed to addr2line. The name of this special symbol + # can then be used as a separator. + $sep_address = undef; # May be filled in by MapSymbolsWithNM() + my $nm_symbols = {}; + MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols); + if (defined($sep_address)) { + # Only add " -i" to addr2line if the binary supports it. + # addr2line --help returns 0, but not if it sees an unknown flag first. + if (system("$cmd -i --help >$dev_null 2>&1") == 0) { + $cmd .= " -i"; + } else { + $sep_address = undef; # no need for sep_address if we don't support -i + } + } + + # Make file with all PC values with intervening 'sep_address' so + # that we can reliably detect the end of inlined function list + open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n"); + if ($debug) { print("---- $image ---\n"); } + for (my $i = 0; $i <= $#{$pclist}; $i++) { + # addr2line always reads hex addresses, and does not need '0x' prefix. + if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); } + printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset)); + if (defined($sep_address)) { + printf ADDRESSES ("%s\n", $sep_address); + } + } + close(ADDRESSES); + if ($debug) { + print("----\n"); + system("cat", $main::tmpfile_sym); + print("----\n"); + system("$cmd < " . ShellEscape($main::tmpfile_sym)); + print("----\n"); + } + + open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |") + || error("$cmd: $!\n"); + my $count = 0; # Index in pclist + while () { + # Read fullfunction and filelineinfo from next pair of lines + s/\r?\n$//g; + my $fullfunction = $_; + $_ = ; + s/\r?\n$//g; + my $filelinenum = $_; + + if (defined($sep_address) && $fullfunction eq $sep_symbol) { + # Terminating marker for data for this address + $count++; + next; + } + + $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths + + my $pcstr = $pclist->[$count]; + my $function = ShortFunctionName($fullfunction); + my $nms = $nm_symbols->{$pcstr}; + if (defined($nms)) { + if ($fullfunction eq '??') { + # nm found a symbol for us. + $function = $nms->[0]; + $fullfunction = $nms->[2]; + } else { + # MapSymbolsWithNM tags each routine with its starting address, + # useful in case the image has multiple occurrences of this + # routine. (It uses a syntax that resembles template paramters, + # that are automatically stripped out by ShortFunctionName().) + # addr2line does not provide the same information. So we check + # if nm disambiguated our symbol, and if so take the annotated + # (nm) version of the routine-name. TODO(csilvers): this won't + # catch overloaded, inlined symbols, which nm doesn't see. + # Better would be to do a check similar to nm's, in this fn. + if ($nms->[2] =~ m/^\Q$function\E/) { # sanity check it's the right fn + $function = $nms->[0]; + $fullfunction = $nms->[2]; + } + } + } + + # Prepend to accumulated symbols for pcstr + # (so that caller comes before callee) + my $sym = $symbols->{$pcstr}; + if (!defined($sym)) { + $sym = []; + $symbols->{$pcstr} = $sym; + } + unshift(@{$sym}, $function, $filelinenum, $fullfunction); + if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); } + if (!defined($sep_address)) { + # Inlining is off, so this entry ends immediately + $count++; + } + } + close(SYMBOLS); +} + +# Use nm to map the list of referenced PCs to symbols. Return true iff we +# are able to read procedure information via nm. +sub MapSymbolsWithNM { + my $image = shift; + my $offset = shift; + my $pclist = shift; + my $symbols = shift; + + # Get nm output sorted by increasing address + my $symbol_table = GetProcedureBoundaries($image, "."); + if (!%{$symbol_table}) { + return 0; + } + # Start addresses are already the right length (8 or 16 hex digits). + my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] } + keys(%{$symbol_table}); + + if ($#names < 0) { + # No symbols: just use addresses + foreach my $pc (@{$pclist}) { + my $pcstr = "0x" . $pc; + $symbols->{$pc} = [$pcstr, "?", $pcstr]; + } + return 0; + } + + # Sort addresses so we can do a join against nm output + my $index = 0; + my $fullname = $names[0]; + my $name = ShortFunctionName($fullname); + foreach my $pc (sort { $a cmp $b } @{$pclist}) { + # Adjust for mapped offset + my $mpc = AddressSub($pc, $offset); + while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){ + $index++; + $fullname = $names[$index]; + $name = ShortFunctionName($fullname); + } + if ($mpc lt $symbol_table->{$fullname}->[1]) { + $symbols->{$pc} = [$name, "?", $fullname]; + } else { + my $pcstr = "0x" . $pc; + $symbols->{$pc} = [$pcstr, "?", $pcstr]; + } + } + return 1; +} + +sub ShortFunctionName { + my $function = shift; + while ($function =~ s/\([^()]*\)(\s*const)?//g) { } # Argument types + while ($function =~ s/<[^<>]*>//g) { } # Remove template arguments + $function =~ s/^.*\s+(\w+::)/$1/; # Remove leading type + return $function; +} + +# Trim overly long symbols found in disassembler output +sub CleanDisassembly { + my $d = shift; + while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax) + while ($d =~ s/(\w+)<[^<>]*>/$1/g) { } # Remove template arguments + return $d; +} + +# Clean file name for display +sub CleanFileName { + my ($f) = @_; + $f =~ s|^/proc/self/cwd/||; + $f =~ s|^\./||; + return $f; +} + +# Make address relative to section and clean up for display +sub UnparseAddress { + my ($offset, $address) = @_; + $address = AddressSub($address, $offset); + $address =~ s/^0x//; + $address =~ s/^0*//; + return $address; +} + +##### Miscellaneous ##### + +# Find the right versions of the above object tools to use. The +# argument is the program file being analyzed, and should be an ELF +# 32-bit or ELF 64-bit executable file. The location of the tools +# is determined by considering the following options in this order: +# 1) --tools option, if set +# 2) JEPROF_TOOLS environment variable, if set +# 3) the environment +sub ConfigureObjTools { + my $prog_file = shift; + + # Check for the existence of $prog_file because /usr/bin/file does not + # predictably return error status in prod. + (-e $prog_file) || error("$prog_file does not exist.\n"); + + my $file_type = undef; + if (-e "/usr/bin/file") { + # Follow symlinks (at least for systems where "file" supports that). + my $escaped_prog_file = ShellEscape($prog_file); + $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null || + /usr/bin/file $escaped_prog_file`; + } elsif ($^O == "MSWin32") { + $file_type = "MS Windows"; + } else { + print STDERR "WARNING: Can't determine the file type of $prog_file"; + } + + if ($file_type =~ /64-bit/) { + # Change $address_length to 16 if the program file is ELF 64-bit. + # We can't detect this from many (most?) heap or lock contention + # profiles, since the actual addresses referenced are generally in low + # memory even for 64-bit programs. + $address_length = 16; + } + + if ($file_type =~ /MS Windows/) { + # For windows, we provide a version of nm and addr2line as part of + # the opensource release, which is capable of parsing + # Windows-style PDB executables. It should live in the path, or + # in the same directory as jeprof. + $obj_tool_map{"nm_pdb"} = "nm-pdb"; + $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb"; + } + + if ($file_type =~ /Mach-O/) { + # OS X uses otool to examine Mach-O files, rather than objdump. + $obj_tool_map{"otool"} = "otool"; + $obj_tool_map{"addr2line"} = "false"; # no addr2line + $obj_tool_map{"objdump"} = "false"; # no objdump + } + + # Go fill in %obj_tool_map with the pathnames to use: + foreach my $tool (keys %obj_tool_map) { + $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool}); + } +} + +# Returns the path of a caller-specified object tool. If --tools or +# JEPROF_TOOLS are specified, then returns the full path to the tool +# with that prefix. Otherwise, returns the path unmodified (which +# means we will look for it on PATH). +sub ConfigureTool { + my $tool = shift; + my $path; + + # --tools (or $JEPROF_TOOLS) is a comma separated list, where each + # item is either a) a pathname prefix, or b) a map of the form + # :. First we look for an entry of type (b) for our + # tool. If one is found, we use it. Otherwise, we consider all the + # pathname prefixes in turn, until one yields an existing file. If + # none does, we use a default path. + my $tools = $main::opt_tools || $ENV{"JEPROF_TOOLS"} || ""; + if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) { + $path = $2; + # TODO(csilvers): sanity-check that $path exists? Hard if it's relative. + } elsif ($tools ne '') { + foreach my $prefix (split(',', $tools)) { + next if ($prefix =~ /:/); # ignore "tool:fullpath" entries in the list + if (-x $prefix . $tool) { + $path = $prefix . $tool; + last; + } + } + if (!$path) { + error("No '$tool' found with prefix specified by " . + "--tools (or \$JEPROF_TOOLS) '$tools'\n"); + } + } else { + # ... otherwise use the version that exists in the same directory as + # jeprof. If there's nothing there, use $PATH. + $0 =~ m,[^/]*$,; # this is everything after the last slash + my $dirname = $`; # this is everything up to and including the last slash + if (-x "$dirname$tool") { + $path = "$dirname$tool"; + } else { + $path = $tool; + } + } + if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; } + return $path; +} + +sub ShellEscape { + my @escaped_words = (); + foreach my $word (@_) { + my $escaped_word = $word; + if ($word =~ m![^a-zA-Z0-9/.,_=-]!) { # check for anything not in whitelist + $escaped_word =~ s/'/'\\''/; + $escaped_word = "'$escaped_word'"; + } + push(@escaped_words, $escaped_word); + } + return join(" ", @escaped_words); +} + +sub cleanup { + unlink($main::tmpfile_sym); + unlink(keys %main::tempnames); + + # We leave any collected profiles in $HOME/jeprof in case the user wants + # to look at them later. We print a message informing them of this. + if ((scalar(@main::profile_files) > 0) && + defined($main::collected_profile)) { + if (scalar(@main::profile_files) == 1) { + print STDERR "Dynamically gathered profile is in $main::collected_profile\n"; + } + print STDERR "If you want to investigate this profile further, you can do:\n"; + print STDERR "\n"; + print STDERR " jeprof \\\n"; + print STDERR " $main::prog \\\n"; + print STDERR " $main::collected_profile\n"; + print STDERR "\n"; + } +} + +sub sighandler { + cleanup(); + exit(1); +} + +sub error { + my $msg = shift; + print STDERR $msg; + cleanup(); + exit(1); +} + + +# Run $nm_command and get all the resulting procedure boundaries whose +# names match "$regexp" and returns them in a hashtable mapping from +# procedure name to a two-element vector of [start address, end address] +sub GetProcedureBoundariesViaNm { + my $escaped_nm_command = shift; # shell-escaped + my $regexp = shift; + + my $symbol_table = {}; + open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n"); + my $last_start = "0"; + my $routine = ""; + while () { + s/\r//g; # turn windows-looking lines into unix-looking lines + if (m/^\s*([0-9a-f]+) (.) (..*)/) { + my $start_val = $1; + my $type = $2; + my $this_routine = $3; + + # It's possible for two symbols to share the same address, if + # one is a zero-length variable (like __start_google_malloc) or + # one symbol is a weak alias to another (like __libc_malloc). + # In such cases, we want to ignore all values except for the + # actual symbol, which in nm-speak has type "T". The logic + # below does this, though it's a bit tricky: what happens when + # we have a series of lines with the same address, is the first + # one gets queued up to be processed. However, it won't + # *actually* be processed until later, when we read a line with + # a different address. That means that as long as we're reading + # lines with the same address, we have a chance to replace that + # item in the queue, which we do whenever we see a 'T' entry -- + # that is, a line with type 'T'. If we never see a 'T' entry, + # we'll just go ahead and process the first entry (which never + # got touched in the queue), and ignore the others. + if ($start_val eq $last_start && $type =~ /t/i) { + # We are the 'T' symbol at this address, replace previous symbol. + $routine = $this_routine; + next; + } elsif ($start_val eq $last_start) { + # We're not the 'T' symbol at this address, so ignore us. + next; + } + + if ($this_routine eq $sep_symbol) { + $sep_address = HexExtend($start_val); + } + + # Tag this routine with the starting address in case the image + # has multiple occurrences of this routine. We use a syntax + # that resembles template parameters that are automatically + # stripped out by ShortFunctionName() + $this_routine .= "<$start_val>"; + + if (defined($routine) && $routine =~ m/$regexp/) { + $symbol_table->{$routine} = [HexExtend($last_start), + HexExtend($start_val)]; + } + $last_start = $start_val; + $routine = $this_routine; + } elsif (m/^Loaded image name: (.+)/) { + # The win32 nm workalike emits information about the binary it is using. + if ($main::opt_debug) { print STDERR "Using Image $1\n"; } + } elsif (m/^PDB file name: (.+)/) { + # The win32 nm workalike emits information about the pdb it is using. + if ($main::opt_debug) { print STDERR "Using PDB $1\n"; } + } + } + close(NM); + # Handle the last line in the nm output. Unfortunately, we don't know + # how big this last symbol is, because we don't know how big the file + # is. For now, we just give it a size of 0. + # TODO(csilvers): do better here. + if (defined($routine) && $routine =~ m/$regexp/) { + $symbol_table->{$routine} = [HexExtend($last_start), + HexExtend($last_start)]; + } + return $symbol_table; +} + +# Gets the procedure boundaries for all routines in "$image" whose names +# match "$regexp" and returns them in a hashtable mapping from procedure +# name to a two-element vector of [start address, end address]. +# Will return an empty map if nm is not installed or not working properly. +sub GetProcedureBoundaries { + my $image = shift; + my $regexp = shift; + + # If $image doesn't start with /, then put ./ in front of it. This works + # around an obnoxious bug in our probing of nm -f behavior. + # "nm -f $image" is supposed to fail on GNU nm, but if: + # + # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND + # b. you have a.out in your current directory (a not uncommon occurence) + # + # then "nm -f $image" succeeds because -f only looks at the first letter of + # the argument, which looks valid because it's [BbSsPp], and then since + # there's no image provided, it looks for a.out and finds it. + # + # This regex makes sure that $image starts with . or /, forcing the -f + # parsing to fail since . and / are not valid formats. + $image =~ s#^[^/]#./$&#; + + # For libc libraries, the copy in /usr/lib/debug contains debugging symbols + my $debugging = DebuggingLibrary($image); + if ($debugging) { + $image = $debugging; + } + + my $nm = $obj_tool_map{"nm"}; + my $cppfilt = $obj_tool_map{"c++filt"}; + + # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm + # binary doesn't support --demangle. In addition, for OS X we need + # to use the -f flag to get 'flat' nm output (otherwise we don't sort + # properly and get incorrect results). Unfortunately, GNU nm uses -f + # in an incompatible way. So first we test whether our nm supports + # --demangle and -f. + my $demangle_flag = ""; + my $cppfilt_flag = ""; + my $to_devnull = ">$dev_null 2>&1"; + if (system(ShellEscape($nm, "--demangle", $image) . $to_devnull) == 0) { + # In this mode, we do "nm --demangle " + $demangle_flag = "--demangle"; + $cppfilt_flag = ""; + } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) { + # In this mode, we do "nm | c++filt" + $cppfilt_flag = " | " . ShellEscape($cppfilt); + }; + my $flatten_flag = ""; + if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) { + $flatten_flag = "-f"; + } + + # Finally, in the case $imagie isn't a debug library, we try again with + # -D to at least get *exported* symbols. If we can't use --demangle, + # we use c++filt instead, if it exists on this system. + my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag, + $image) . " 2>$dev_null $cppfilt_flag", + ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag, + $image) . " 2>$dev_null $cppfilt_flag", + # 6nm is for Go binaries + ShellEscape("6nm", "$image") . " 2>$dev_null | sort", + ); + + # If the executable is an MS Windows PDB-format executable, we'll + # have set up obj_tool_map("nm_pdb"). In this case, we actually + # want to use both unix nm and windows-specific nm_pdb, since + # PDB-format executables can apparently include dwarf .o files. + if (exists $obj_tool_map{"nm_pdb"}) { + push(@nm_commands, + ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image) + . " 2>$dev_null"); + } + + foreach my $nm_command (@nm_commands) { + my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp); + return $symbol_table if (%{$symbol_table}); + } + my $symbol_table = {}; + return $symbol_table; +} + + +# The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings. +# To make them more readable, we add underscores at interesting places. +# This routine removes the underscores, producing the canonical representation +# used by jeprof to represent addresses, particularly in the tested routines. +sub CanonicalHex { + my $arg = shift; + return join '', (split '_',$arg); +} + + +# Unit test for AddressAdd: +sub AddressAddUnitTest { + my $test_data_8 = shift; + my $test_data_16 = shift; + my $error_count = 0; + my $fail_count = 0; + my $pass_count = 0; + # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n"; + + # First a few 8-nibble addresses. Note that this implementation uses + # plain old arithmetic, so a quick sanity check along with verifying what + # happens to overflow (we want it to wrap): + $address_length = 8; + foreach my $row (@{$test_data_8}) { + if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } + my $sum = AddressAdd ($row->[0], $row->[1]); + if ($sum ne $row->[2]) { + printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, + $row->[0], $row->[1], $row->[2]; + ++$fail_count; + } else { + ++$pass_count; + } + } + printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n", + $pass_count, $fail_count; + $error_count = $fail_count; + $fail_count = 0; + $pass_count = 0; + + # Now 16-nibble addresses. + $address_length = 16; + foreach my $row (@{$test_data_16}) { + if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } + my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1])); + my $expected = join '', (split '_',$row->[2]); + if ($sum ne CanonicalHex($row->[2])) { + printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, + $row->[0], $row->[1], $row->[2]; + ++$fail_count; + } else { + ++$pass_count; + } + } + printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n", + $pass_count, $fail_count; + $error_count += $fail_count; + + return $error_count; +} + + +# Unit test for AddressSub: +sub AddressSubUnitTest { + my $test_data_8 = shift; + my $test_data_16 = shift; + my $error_count = 0; + my $fail_count = 0; + my $pass_count = 0; + # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n"; + + # First a few 8-nibble addresses. Note that this implementation uses + # plain old arithmetic, so a quick sanity check along with verifying what + # happens to overflow (we want it to wrap): + $address_length = 8; + foreach my $row (@{$test_data_8}) { + if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } + my $sum = AddressSub ($row->[0], $row->[1]); + if ($sum ne $row->[3]) { + printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, + $row->[0], $row->[1], $row->[3]; + ++$fail_count; + } else { + ++$pass_count; + } + } + printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n", + $pass_count, $fail_count; + $error_count = $fail_count; + $fail_count = 0; + $pass_count = 0; + + # Now 16-nibble addresses. + $address_length = 16; + foreach my $row (@{$test_data_16}) { + if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } + my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1])); + if ($sum ne CanonicalHex($row->[3])) { + printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, + $row->[0], $row->[1], $row->[3]; + ++$fail_count; + } else { + ++$pass_count; + } + } + printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n", + $pass_count, $fail_count; + $error_count += $fail_count; + + return $error_count; +} + + +# Unit test for AddressInc: +sub AddressIncUnitTest { + my $test_data_8 = shift; + my $test_data_16 = shift; + my $error_count = 0; + my $fail_count = 0; + my $pass_count = 0; + # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n"; + + # First a few 8-nibble addresses. Note that this implementation uses + # plain old arithmetic, so a quick sanity check along with verifying what + # happens to overflow (we want it to wrap): + $address_length = 8; + foreach my $row (@{$test_data_8}) { + if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } + my $sum = AddressInc ($row->[0]); + if ($sum ne $row->[4]) { + printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, + $row->[0], $row->[4]; + ++$fail_count; + } else { + ++$pass_count; + } + } + printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n", + $pass_count, $fail_count; + $error_count = $fail_count; + $fail_count = 0; + $pass_count = 0; + + # Now 16-nibble addresses. + $address_length = 16; + foreach my $row (@{$test_data_16}) { + if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } + my $sum = AddressInc (CanonicalHex($row->[0])); + if ($sum ne CanonicalHex($row->[4])) { + printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, + $row->[0], $row->[4]; + ++$fail_count; + } else { + ++$pass_count; + } + } + printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n", + $pass_count, $fail_count; + $error_count += $fail_count; + + return $error_count; +} + + +# Driver for unit tests. +# Currently just the address add/subtract/increment routines for 64-bit. +sub RunUnitTests { + my $error_count = 0; + + # This is a list of tuples [a, b, a+b, a-b, a+1] + my $unit_test_data_8 = [ + [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)], + [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)], + [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)], + [qw(00000001 ffffffff 00000000 00000002 00000002)], + [qw(00000001 fffffff0 fffffff1 00000011 00000002)], + ]; + my $unit_test_data_16 = [ + # The implementation handles data in 7-nibble chunks, so those are the + # interesting boundaries. + [qw(aaaaaaaa 50505050 + 00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)], + [qw(50505050 aaaaaaaa + 00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)], + [qw(ffffffff aaaaaaaa + 00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)], + [qw(00000001 ffffffff + 00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)], + [qw(00000001 fffffff0 + 00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)], + + [qw(00_a00000a_aaaaaaa 50505050 + 00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)], + [qw(0f_fff0005_0505050 aaaaaaaa + 0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)], + [qw(00_000000f_fffffff 01_800000a_aaaaaaa + 01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)], + [qw(00_0000000_0000001 ff_fffffff_fffffff + 00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)], + [qw(00_0000000_0000001 ff_fffffff_ffffff0 + ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)], + ]; + + $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16); + $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16); + $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16); + if ($error_count > 0) { + print STDERR $error_count, " errors: FAILED\n"; + } else { + print STDERR "PASS\n"; + } + exit ($error_count); +} diff --git a/deps/jemalloc/build-aux/config.guess b/deps/jemalloc/build-aux/config.guess new file mode 100755 index 0000000..2e9ad7f --- /dev/null +++ b/deps/jemalloc/build-aux/config.guess @@ -0,0 +1,1462 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright 1992-2016 Free Software Foundation, Inc. + +timestamp='2016-10-02' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. +# +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess +# +# Please send patches to . + + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright 1992-2016 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +case "${UNAME_SYSTEM}" in +Linux|GNU|GNU/*) + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + LIBC=gnu + + eval $set_cc_for_build + cat <<-EOF > $dummy.c + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #else + LIBC=gnu + #endif + EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + ;; +esac + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ + /sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || \ + echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + earmv*) + arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'` + endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'` + machine=${arch}${endian}-unknown + ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently (or will in the future) and ABI. + case "${UNAME_MACHINE_ARCH}" in + earm*) + os=netbsdelf + ;; + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # Determine ABI tags. + case "${UNAME_MACHINE_ARCH}" in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"` + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}${abi}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE} + exit ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit ;; + *:SolidBSD:*:*) + echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:Sortix:*:*) + echo ${UNAME_MACHINE}-unknown-sortix + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE=alpha ;; + "EV4.5 (21064)") + UNAME_MACHINE=alpha ;; + "LCA4 (21066/21068)") + UNAME_MACHINE=alpha ;; + "EV5 (21164)") + UNAME_MACHINE=alphaev5 ;; + "EV5.6 (21164A)") + UNAME_MACHINE=alphaev56 ;; + "EV5.6 (21164PC)") + UNAME_MACHINE=alphapca56 ;; + "EV5.7 (21164PC)") + UNAME_MACHINE=alphapca57 ;; + "EV6 (21264)") + UNAME_MACHINE=alphaev6 ;; + "EV6.7 (21264A)") + UNAME_MACHINE=alphaev67 ;; + "EV6.8CB (21264C)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8AL (21264B)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8CX (21264D)") + UNAME_MACHINE=alphaev68 ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE=alphaev69 ;; + "EV7 (21364)") + UNAME_MACHINE=alphaev7 ;; + "EV7.9 (21364A)") + UNAME_MACHINE=alphaev79 ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux${UNAME_RELEASE} + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + eval $set_cc_for_build + SUN_ARCH=i386 + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH=x86_64 + fi + fi + echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && + dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`$dummy $dummyarg` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos${UNAME_RELEASE} + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/lslpp ] ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = hppa2.0w ] + then + eval $set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH=hppa2.0w + else + HP_ARCH=hppa64 + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case ${UNAME_PROCESSOR} in + amd64) + echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + *) + echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + esac + exit ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit ;; + *:MINGW64*:*) + echo ${UNAME_MACHINE}-pc-mingw64 + exit ;; + *:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit ;; + *:MSYS*:*) + echo ${UNAME_MACHINE}-pc-msys + exit ;; + i*:windows32*:*) + # uname -m includes "-pc" on this system. + echo ${UNAME_MACHINE}-mingw32 + exit ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit ;; + *:Interix*:*) + case ${UNAME_MACHINE} in + x86) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + IA64) + echo ia64-unknown-interix${UNAME_RELEASE} + exit ;; + esac ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit ;; + 8664:Windows_NT:*) + echo x86_64-pc-mks + exit ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} + exit ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit ;; + aarch64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi + else + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + cris:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + crisv32:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + e2k:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + frv:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + hexagon:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + k1om:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef ${UNAME_MACHINE} + #undef ${UNAME_MACHINE}el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=${UNAME_MACHINE}el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=${UNAME_MACHINE} + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } + ;; + mips64el:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-${LIBC} + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-${LIBC} + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-${LIBC} + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; + PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; + *) echo hppa-unknown-linux-${LIBC} ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-${LIBC} + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-${LIBC} + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-${LIBC} + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-${LIBC} + exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux-${LIBC} + exit ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + tile*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-${LIBC} + exit ;; + x86_64:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + xtensa*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configure will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux${UNAME_RELEASE} + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux${UNAME_RELEASE} + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux${UNAME_RELEASE} + exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux${UNAME_RELEASE} + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + eval $set_cc_for_build + if test "$UNAME_PROCESSOR" = unknown ; then + UNAME_PROCESSOR=powerpc + fi + if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # Avoid executing cc on OS X 10.9, as it ships with a stub + # that puts up a graphical alert prompting to install + # developer tools. Any system running Mac OS X 10.7 or + # later (Darwin 11 and later) is required to have a 64-bit + # processor. This is not true of the ARM version of Darwin + # that Apple uses in portable devices. + UNAME_PROCESSOR=x86_64 + fi + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = x86; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-?:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk${UNAME_RELEASE} + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit ;; + NSR-?:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = 386; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'` + exit ;; + i*86:rdos:*:*) + echo ${UNAME_MACHINE}-pc-rdos + exit ;; + i*86:AROS:*:*) + echo ${UNAME_MACHINE}-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo ${UNAME_MACHINE}-unknown-esx + exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs + exit ;; +esac + +cat >&2 </dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/deps/jemalloc/build-aux/config.sub b/deps/jemalloc/build-aux/config.sub new file mode 100755 index 0000000..dd2ca93 --- /dev/null +++ b/deps/jemalloc/build-aux/config.sub @@ -0,0 +1,1825 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright 1992-2016 Free Software Foundation, Inc. + +timestamp='2016-11-04' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches to . +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright 1992-2016 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ + linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ + kopensolaris*-gnu* | cloudabi*-eabi* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + android-linux) + os=-linux-android + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray | -microblaze*) + os= + basic_machine=$1 + ;; + -bluegene*) + os=-cnk + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*178) + os=-lynxos178 + ;; + -lynx*5) + os=-lynxos5 + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | aarch64 | aarch64_be \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arceb \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ + | avr | avr32 \ + | ba \ + | be32 | be64 \ + | bfin \ + | c4x | c8051 | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | e2k | epiphany \ + | fido | fr30 | frv | ft32 \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i860 | i960 | ia64 \ + | ip2k | iq2000 \ + | k1om \ + | le32 | le64 \ + | lm32 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa32r6 | mipsisa32r6el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64r6 | mipsisa64r6el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nios | nios2 | nios2eb | nios2el \ + | ns16k | ns32k \ + | open8 | or1k | or1knd | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pru \ + | pyramid \ + | riscv32 | riscv64 \ + | rl78 | rx \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu \ + | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ + | ubicom32 \ + | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | visium \ + | we32k \ + | x86 | xc16x | xstormy16 | xtensa \ + | z8k | z80) + basic_machine=$basic_machine-unknown + ;; + c54x) + basic_machine=tic54x-unknown + ;; + c55x) + basic_machine=tic55x-unknown + ;; + c6x) + basic_machine=tic6x-unknown + ;; + leon|leon[3-9]) + basic_machine=sparc-$basic_machine + ;; + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + ms1) + basic_machine=mt-unknown + ;; + + strongarm | thumb | xscale) + basic_machine=arm-unknown + ;; + xgate) + basic_machine=$basic_machine-unknown + os=-none + ;; + xscaleeb) + basic_machine=armeb-unknown + ;; + + xscaleel) + basic_machine=armel-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | aarch64-* | aarch64_be-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | ba-* \ + | be32-* | be64-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* \ + | c8051-* | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | e2k-* | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | hexagon-* \ + | i*86-* | i860-* | i960-* | ia64-* \ + | ip2k-* | iq2000-* \ + | k1om-* \ + | le32-* | le64-* \ + | lm32-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ + | microblaze-* | microblazeel-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64octeon-* | mips64octeonel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64r5900-* | mips64r5900el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa32r6-* | mipsisa32r6el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64r6-* | mipsisa64r6el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipsr5900-* | mipsr5900el-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nds32-* | nds32le-* | nds32be-* \ + | nios-* | nios2-* | nios2eb-* | nios2el-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | open8-* \ + | or1k*-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pru-* \ + | pyramid-* \ + | riscv32-* | riscv64-* \ + | rl78-* | romp-* | rs6000-* | rx-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ + | tahoe-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tile*-* \ + | tron-* \ + | ubicom32-* \ + | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ + | vax-* \ + | visium-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* \ + | xstormy16-* | xtensa*-* \ + | ymp-* \ + | z8k-* | z80-*) + ;; + # Recognize the basic CPU types without company name, with glob match. + xtensa*) + basic_machine=$basic_machine-unknown + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aros) + basic_machine=i386-pc + os=-aros + ;; + asmjs) + basic_machine=asmjs-unknown + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=-linux + ;; + blackfin-*) + basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + bluegene*) + basic_machine=powerpc-ibm + os=-cnk + ;; + c54x-*) + basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c55x-*) + basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c6x-*) + basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + cegcc) + basic_machine=arm-unknown + os=-cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16 | cr16-*) + basic_machine=cr16-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dicos) + basic_machine=i686-pc + os=-dicos + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + e500v[12]) + basic_machine=powerpc-unknown + os=$os"spe" + ;; + e500v[12]-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + os=$os"spe" + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + leon-*|leon[3-9]-*) + basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'` + ;; + m68knommu) + basic_machine=m68k-unknown + os=-linux + ;; + m68knommu-*) + basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + microblaze*) + basic_machine=microblaze-xilinx + ;; + mingw64) + basic_machine=x86_64-pc + os=-mingw64 + ;; + mingw32) + basic_machine=i686-pc + os=-mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + moxiebox) + basic_machine=moxie-unknown + os=-moxiebox + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + ;; + msys) + basic_machine=i686-pc + os=-msys + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + nacl) + basic_machine=le32-unknown + os=-nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + neo-tandem) + basic_machine=neo-tandem + ;; + nse-tandem) + basic_machine=nse-tandem + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + parisc) + basic_machine=hppa-unknown + os=-linux + ;; + parisc-*) + basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc | ppcbe) basic_machine=powerpc-unknown + ;; + ppc-* | ppcbe-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + os=-rdos + ;; + rdos32) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + strongarm-* | thumb-*) + basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tile*) + basic_machine=$basic_machine-unknown + os=-linux-gnu + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + xscale-* | xscalee[bl]-*) + basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + z80-*-coff) + basic_machine=z80-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -auroraux) + os=-auroraux + ;; + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ + | -sym* | -kopensolaris* | -plan9* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* | -aros* | -cloudabi* | -sortix* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ + | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* | -cegcc* \ + | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-musl* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ + | -onefs* | -tirtos* | -phoenix* | -fuchsia*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -zvmoe) + os=-zvmoe + ;; + -dicos*) + os=-dicos + ;; + -nacl*) + ;; + -ios) + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + c8051-*) + os=-elf + ;; + hexagon-*) + os=-elf + ;; + tic54x-*) + os=-coff + ;; + tic55x-*) + os=-coff + ;; + tic6x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + *-be) + os=-beos + ;; + *-haiku) + os=-haiku + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -cnk*|-aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/deps/jemalloc/build-aux/install-sh b/deps/jemalloc/build-aux/install-sh new file mode 100755 index 0000000..ebc6691 --- /dev/null +++ b/deps/jemalloc/build-aux/install-sh @@ -0,0 +1,250 @@ +#! /bin/sh +# +# install - install a program, script, or datafile +# This comes from X11R5 (mit/util/scripts/install.sh). +# +# Copyright 1991 by the Massachusetts Institute of Technology +# +# Permission to use, copy, modify, distribute, and sell this software and its +# documentation for any purpose is hereby granted without fee, provided that +# the above copyright notice appear in all copies and that both that +# copyright notice and this permission notice appear in supporting +# documentation, and that the name of M.I.T. not be used in advertising or +# publicity pertaining to distribution of the software without specific, +# written prior permission. M.I.T. makes no representations about the +# suitability of this software for any purpose. It is provided "as is" +# without express or implied warranty. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. It can only install one file at a time, a restriction +# shared with many OS's install programs. + + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit="${DOITPROG-}" + + +# put in absolute paths if you don't have them in your path; or use env. vars. + +mvprog="${MVPROG-mv}" +cpprog="${CPPROG-cp}" +chmodprog="${CHMODPROG-chmod}" +chownprog="${CHOWNPROG-chown}" +chgrpprog="${CHGRPPROG-chgrp}" +stripprog="${STRIPPROG-strip}" +rmprog="${RMPROG-rm}" +mkdirprog="${MKDIRPROG-mkdir}" + +transformbasename="" +transform_arg="" +instcmd="$mvprog" +chmodcmd="$chmodprog 0755" +chowncmd="" +chgrpcmd="" +stripcmd="" +rmcmd="$rmprog -f" +mvcmd="$mvprog" +src="" +dst="" +dir_arg="" + +while [ x"$1" != x ]; do + case $1 in + -c) instcmd="$cpprog" + shift + continue;; + + -d) dir_arg=true + shift + continue;; + + -m) chmodcmd="$chmodprog $2" + shift + shift + continue;; + + -o) chowncmd="$chownprog $2" + shift + shift + continue;; + + -g) chgrpcmd="$chgrpprog $2" + shift + shift + continue;; + + -s) stripcmd="$stripprog" + shift + continue;; + + -t=*) transformarg=`echo $1 | sed 's/-t=//'` + shift + continue;; + + -b=*) transformbasename=`echo $1 | sed 's/-b=//'` + shift + continue;; + + *) if [ x"$src" = x ] + then + src=$1 + else + # this colon is to work around a 386BSD /bin/sh bug + : + dst=$1 + fi + shift + continue;; + esac +done + +if [ x"$src" = x ] +then + echo "install: no input file specified" + exit 1 +else + true +fi + +if [ x"$dir_arg" != x ]; then + dst=$src + src="" + + if [ -d $dst ]; then + instcmd=: + else + instcmd=mkdir + fi +else + +# Waiting for this to be detected by the "$instcmd $src $dsttmp" command +# might cause directories to be created, which would be especially bad +# if $src (and thus $dsttmp) contains '*'. + + if [ -f $src -o -d $src ] + then + true + else + echo "install: $src does not exist" + exit 1 + fi + + if [ x"$dst" = x ] + then + echo "install: no destination specified" + exit 1 + else + true + fi + +# If destination is a directory, append the input filename; if your system +# does not like double slashes in filenames, you may need to add some logic + + if [ -d $dst ] + then + dst="$dst"/`basename $src` + else + true + fi +fi + +## this sed command emulates the dirname command +dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` + +# Make sure that the destination directory exists. +# this part is taken from Noah Friedman's mkinstalldirs script + +# Skip lots of stat calls in the usual case. +if [ ! -d "$dstdir" ]; then +defaultIFS=' +' +IFS="${IFS-${defaultIFS}}" + +oIFS="${IFS}" +# Some sh's can't handle IFS=/ for some reason. +IFS='%' +set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` +IFS="${oIFS}" + +pathcomp='' + +while [ $# -ne 0 ] ; do + pathcomp="${pathcomp}${1}" + shift + + if [ ! -d "${pathcomp}" ] ; + then + $mkdirprog "${pathcomp}" + else + true + fi + + pathcomp="${pathcomp}/" +done +fi + +if [ x"$dir_arg" != x ] +then + $doit $instcmd $dst && + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi +else + +# If we're going to rename the final executable, determine the name now. + + if [ x"$transformarg" = x ] + then + dstfile=`basename $dst` + else + dstfile=`basename $dst $transformbasename | + sed $transformarg`$transformbasename + fi + +# don't allow the sed command to completely eliminate the filename + + if [ x"$dstfile" = x ] + then + dstfile=`basename $dst` + else + true + fi + +# Make a temp file name in the proper directory. + + dsttmp=$dstdir/#inst.$$# + +# Move or copy the file name to the temp name + + $doit $instcmd $src $dsttmp && + + trap "rm -f ${dsttmp}" 0 && + +# and set any options; do chmod last to preserve setuid bits + +# If any of these fail, we abort the whole thing. If we want to +# ignore errors from any of these, just make sure not to ignore +# errors from the above "$doit $instcmd $src $dsttmp" command. + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && + +# Now rename the file to the real destination. + + $doit $rmcmd -f $dstdir/$dstfile && + $doit $mvcmd $dsttmp $dstdir/$dstfile + +fi && + + +exit 0 diff --git a/deps/jemalloc/config.stamp.in b/deps/jemalloc/config.stamp.in new file mode 100644 index 0000000..e69de29 diff --git a/deps/jemalloc/configure b/deps/jemalloc/configure new file mode 100755 index 0000000..dace1f6 --- /dev/null +++ b/deps/jemalloc/configure @@ -0,0 +1,14112 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.69. +# +# +# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Use a proper internal environment variable to ensure we don't fall + # into an infinite loop, continuously re-executing ourselves. + if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then + _as_can_reexec=no; export _as_can_reexec; + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +as_fn_exit 255 + fi + # We don't want this to propagate to other subprocesses. + { _as_can_reexec=; unset _as_can_reexec;} +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +else + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1 +test -x / || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1" + if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +else + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes + if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 +fi +fi + done;; + esac + as_found=false +done +$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi; } +IFS=$as_save_IFS + + + if test "x$CONFIG_SHELL" != x; then : + export CONFIG_SHELL + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 +fi + + if test x$as_have_required = xno; then : + $as_echo "$0: This script requires a shell more modern than all" + $as_echo "$0: the shells that I found on your system." + if test x${ZSH_VERSION+set} = xset ; then + $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" + $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else + $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, +$0: including any error possibly output before this +$0: message. Then install a modern shell, or manually run +$0: the script under such a shell if you do have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # If we had to re-execute with $CONFIG_SHELL, we're ensured to have + # already done that, so ensure we don't try to do so again and fall + # in an infinite loop. This has already happened in practice. + _as_can_reexec=no; export _as_can_reexec + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +test -n "$DJDIR" || exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME= +PACKAGE_TARNAME= +PACKAGE_VERSION= +PACKAGE_STRING= +PACKAGE_BUGREPORT= +PACKAGE_URL= + +ac_unique_file="Makefile.in" +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='LTLIBOBJS +LIBOBJS +cfgoutputs_out +cfgoutputs_in +cfghdrs_out +cfghdrs_in +enable_initial_exec_tls +enable_zone_allocator +enable_tls +enable_lazy_lock +libdl +enable_opt_safety_checks +enable_readlinkat +enable_log +enable_cache_oblivious +enable_xmalloc +enable_utrace +enable_fill +enable_prof +enable_experimental_smallocx +enable_stats +enable_debug +je_ +install_suffix +private_namespace +JEMALLOC_CPREFIX +JEMALLOC_PREFIX +enable_static +enable_shared +enable_doc +AUTOCONF +LD +RANLIB +INSTALL_DATA +INSTALL_SCRIPT +INSTALL_PROGRAM +enable_autogen +RPATH_EXTRA +LM +CC_MM +DUMP_SYMS +AROUT +ARFLAGS +MKLIB +TEST_LD_MODE +LDTARGET +CTARGET +PIC_CFLAGS +SOREV +EXTRA_LDFLAGS +DSO_LDFLAGS +link_whole_archive +libprefix +exe +a +o +importlib +so +LD_PRELOAD_VAR +RPATH +abi +jemalloc_version_gid +jemalloc_version_nrev +jemalloc_version_bugfix +jemalloc_version_minor +jemalloc_version_major +jemalloc_version +AWK +NM +AR +host_os +host_vendor +host_cpu +host +build_os +build_vendor +build_cpu +build +EGREP +GREP +EXTRA_CXXFLAGS +SPECIFIED_CXXFLAGS +CONFIGURE_CXXFLAGS +enable_cxx +HAVE_CXX14 +ac_ct_CXX +CXXFLAGS +CXX +CPP +EXTRA_CFLAGS +SPECIFIED_CFLAGS +CONFIGURE_CFLAGS +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +XSLROOT +XSLTPROC +MANDIR +DATADIR +LIBDIR +INCLUDEDIR +BINDIR +PREFIX +abs_objroot +objroot +abs_srcroot +srcroot +rev +CONFIG +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +runstatedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +with_xslroot +enable_cxx +with_lg_vaddr +with_version +with_rpath +enable_autogen +enable_doc +enable_shared +enable_static +with_mangling +with_jemalloc_prefix +with_export +with_private_namespace +with_install_suffix +with_malloc_conf +enable_debug +enable_stats +enable_experimental_smallocx +enable_prof +enable_prof_libunwind +with_static_libunwind +enable_prof_libgcc +enable_prof_gcc +enable_fill +enable_utrace +enable_xmalloc +enable_cache_oblivious +enable_log +enable_readlinkat +enable_opt_safety_checks +with_lg_quantum +with_lg_page +with_lg_hugepage +enable_libdl +enable_syscall +enable_lazy_lock +enable_zone_allocator +enable_initial_exec_tls +' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +CPP +CXX +CXXFLAGS +CCC' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +runstatedir='${localstatedir}/run' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -runstatedir | --runstatedir | --runstatedi | --runstated \ + | --runstate | --runstat | --runsta | --runst | --runs \ + | --run | --ru | --r) + ac_prev=runstatedir ;; + -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ + | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ + | --run=* | --ru=* | --r=*) + runstatedir=$ac_optarg ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir runstatedir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures this package to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + + cat <<\_ACEOF + +Optional Features: + --disable-option-checking ignore unrecognized --enable/--with options + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --disable-cxx Disable C++ integration + --enable-autogen Automatically regenerate configure output + --enable-documentation Build documentation + --enable-shared Build shared libaries + --enable-static Build static libaries + --enable-debug Build debugging code + --disable-stats Disable statistics calculation/reporting + --enable-experimental-smallocx + Enable experimental smallocx API + --enable-prof Enable allocation profiling + --enable-prof-libunwind Use libunwind for backtracing + --disable-prof-libgcc Do not use libgcc for backtracing + --disable-prof-gcc Do not use gcc intrinsics for backtracing + --disable-fill Disable support for junk/zero filling + --enable-utrace Enable utrace(2)-based tracing + --enable-xmalloc Support xmalloc option + --disable-cache-oblivious + Disable support for cache-oblivious allocation + alignment + --enable-log Support debug logging + --enable-readlinkat Use readlinkat over readlink + --enable-opt-safety-checks + Perform certain low-overhead checks, even in opt + mode + --disable-libdl Do not use libdl + --disable-syscall Disable use of syscall(2) + --enable-lazy-lock Enable lazy locking (only lock when multi-threaded) + --disable-zone-allocator + Disable zone allocator for Darwin + --disable-initial-exec-tls + Disable the initial-exec tls model + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-xslroot= XSL stylesheet root path + --with-lg-vaddr= + Number of significant virtual address bits + --with-version=..--g + Version string + --with-rpath= Colon-separated rpath (ELF systems only) + --with-mangling= Mangle symbols in + --with-jemalloc-prefix= + Prefix to prepend to all public APIs + --without-export disable exporting jemalloc public APIs + --with-private-namespace= + Prefix to prepend to all library-private APIs + --with-install-suffix= + Suffix to append to all installed files + --with-malloc-conf= + config.malloc_conf options string + --with-static-libunwind= + Path to static libunwind library; use rather than + dynamically linking + --with-lg-quantum= + Base 2 log of minimum allocation alignment + --with-lg-page= + Base 2 log of system page size + --with-lg-hugepage= + Base 2 log of system huge page size + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CPP C preprocessor + CXX C++ compiler command + CXXFLAGS C++ compiler flags + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to the package provider. +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +configure +generated by GNU Autoconf 2.69 + +Copyright (C) 2012 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_c_try_cpp LINENO +# ---------------------- +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_cpp + +# ac_fn_cxx_try_compile LINENO +# ---------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_compile + +# ac_fn_c_try_link LINENO +# ----------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_link + +# ac_fn_c_try_run LINENO +# ---------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_c_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_run + +# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_c_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_compile + +# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES +# -------------------------------------------- +# Tries to find the compile-time value of EXPR in a program that includes +# INCLUDES, setting VAR accordingly. Returns whether the value could be +# computed +ac_fn_c_compute_int () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if test "$cross_compiling" = yes; then + # Depending upon the size, compute the lo and hi bounds. +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) >= 0)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_lo=0 ac_mid=0 + while :; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) <= $ac_mid)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_hi=$ac_mid; break +else + as_fn_arith $ac_mid + 1 && ac_lo=$as_val + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) < 0)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_hi=-1 ac_mid=-1 + while :; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) >= $ac_mid)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_lo=$ac_mid; break +else + as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + ac_lo= ac_hi= +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +# Binary search between lo and hi bounds. +while test "x$ac_lo" != "x$ac_hi"; do + as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) <= $ac_mid)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_hi=$ac_mid +else + as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +done +case $ac_lo in #(( +?*) eval "$3=\$ac_lo"; ac_retval=0 ;; +'') ac_retval=1 ;; +esac + else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +static long int longval () { return $2; } +static unsigned long int ulongval () { return $2; } +#include +#include +int +main () +{ + + FILE *f = fopen ("conftest.val", "w"); + if (! f) + return 1; + if (($2) < 0) + { + long int i = longval (); + if (i != ($2)) + return 1; + fprintf (f, "%ld", i); + } + else + { + unsigned long int i = ulongval (); + if (i != ($2)) + return 1; + fprintf (f, "%lu", i); + } + /* Do not output a trailing newline, as this causes \r\n confusion + on some platforms. */ + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + echo >>conftest.val; read $3 &5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 +$as_echo_n "checking $2 usability... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_header_compiler=yes +else + ac_header_compiler=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } + +# Is the header present? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 +$as_echo_n "checking $2 presence... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <$2> +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + ac_header_preproc=yes +else + ac_header_preproc=no +fi +rm -f conftest.err conftest.i conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( + yes:no: ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} + ;; + no:yes:* ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} + ;; +esac + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=\$ac_header_compiler" +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_mongrel + +# ac_fn_c_check_func LINENO FUNC VAR +# ---------------------------------- +# Tests whether FUNC exists, setting the cache variable VAR accordingly +ac_fn_c_check_func () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Define $2 to an innocuous variant, in case declares $2. + For example, HP-UX 11i declares gettimeofday. */ +#define $2 innocuous_$2 + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $2 (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $2 + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $2 (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$2 || defined __stub___$2 +choke me +#endif + +int +main () +{ +return $2 (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_func + +# ac_fn_c_check_type LINENO TYPE VAR INCLUDES +# ------------------------------------------- +# Tests whether TYPE exists after having included INCLUDES, setting cache +# variable VAR accordingly. +ac_fn_c_check_type () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=no" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +if (sizeof ($2)) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +if (sizeof (($2))) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + eval "$3=yes" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_type +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by $as_me, which was +generated by GNU Autoconf 2.69. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + $as_echo "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + $as_echo "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + $as_echo "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + $as_echo "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +$as_echo "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_URL "$PACKAGE_URL" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + # We do not want a PATH search for config.site. + case $CONFIG_SITE in #(( + -*) ac_site_file1=./$CONFIG_SITE;; + */*) ac_site_file1=$CONFIG_SITE;; + *) ac_site_file1=./$CONFIG_SITE;; + esac +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +ac_aux_dir= +for ac_dir in build-aux "$srcdir"/build-aux; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + as_fn_error $? "cannot find install-sh, install.sh, or shtool in build-aux \"$srcdir\"/build-aux" "$LINENO" 5 +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + + + + + + + +CONFIGURE_CFLAGS= +SPECIFIED_CFLAGS="${CFLAGS}" + + + + + +CONFIGURE_CXXFLAGS= +SPECIFIED_CXXFLAGS="${CXXFLAGS}" + + + + + +CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'` + + +rev=2 + + +srcroot=$srcdir +if test "x${srcroot}" = "x." ; then + srcroot="" +else + srcroot="${srcroot}/" +fi + +abs_srcroot="`cd \"${srcdir}\"; pwd`/" + + +objroot="" + +abs_objroot="`pwd`/" + + +if test "x$prefix" = "xNONE" ; then + prefix="/usr/local" +fi +if test "x$exec_prefix" = "xNONE" ; then + exec_prefix=$prefix +fi +PREFIX=$prefix + +BINDIR=`eval echo $bindir` +BINDIR=`eval echo $BINDIR` + +INCLUDEDIR=`eval echo $includedir` +INCLUDEDIR=`eval echo $INCLUDEDIR` + +LIBDIR=`eval echo $libdir` +LIBDIR=`eval echo $LIBDIR` + +DATADIR=`eval echo $datadir` +DATADIR=`eval echo $DATADIR` + +MANDIR=`eval echo $mandir` +MANDIR=`eval echo $MANDIR` + + +# Extract the first word of "xsltproc", so it can be a program name with args. +set dummy xsltproc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_XSLTPROC+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $XSLTPROC in + [\\/]* | ?:[\\/]*) + ac_cv_path_XSLTPROC="$XSLTPROC" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_XSLTPROC="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_XSLTPROC" && ac_cv_path_XSLTPROC="false" + ;; +esac +fi +XSLTPROC=$ac_cv_path_XSLTPROC +if test -n "$XSLTPROC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XSLTPROC" >&5 +$as_echo "$XSLTPROC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then + DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl" +elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then + DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets" +else + DEFAULT_XSLROOT="" +fi + +# Check whether --with-xslroot was given. +if test "${with_xslroot+set}" = set; then : + withval=$with_xslroot; +if test "x$with_xslroot" = "xno" ; then + XSLROOT="${DEFAULT_XSLROOT}" +else + XSLROOT="${with_xslroot}" +fi + +else + XSLROOT="${DEFAULT_XSLROOT}" + +fi + +if test "x$XSLTPROC" = "xfalse" ; then + XSLROOT="" +fi + + +CFLAGS=$CFLAGS +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi +if test -z "$ac_file"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if ${ac_cv_objext+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if ${ac_cv_c_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if ${ac_cv_prog_cc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +struct stat; +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +if test "x$GCC" != "xyes" ; then + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is MSVC" >&5 +$as_echo_n "checking whether compiler is MSVC... " >&6; } +if ${je_cv_msvc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + +#ifndef _MSC_VER + int fail-1; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_msvc=yes +else + je_cv_msvc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_msvc" >&5 +$as_echo "$je_cv_msvc" >&6; } +fi + +je_cv_cray_prgenv_wrapper="" +if test "x${PE_ENV}" != "x" ; then + case "${CC}" in + CC|cc) + je_cv_cray_prgenv_wrapper="yes" + ;; + *) + ;; + esac +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is cray" >&5 +$as_echo_n "checking whether compiler is cray... " >&6; } +if ${je_cv_cray+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + +#ifndef _CRAYC + int fail-1; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cray=yes +else + je_cv_cray=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray" >&5 +$as_echo "$je_cv_cray" >&6; } + +if test "x${je_cv_cray}" = "xyes" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cray compiler version is 8.4" >&5 +$as_echo_n "checking whether cray compiler version is 8.4... " >&6; } +if ${je_cv_cray_84+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + +#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4) + int fail-1; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cray_84=yes +else + je_cv_cray_84=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray_84" >&5 +$as_echo "$je_cv_cray_84" >&6; } +fi + +if test "x$GCC" = "xyes" ; then + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu11" >&5 +$as_echo_n "checking whether compiler supports -std=gnu11... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-std=gnu11 + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-std=gnu11 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + if test "x$je_cv_cflags_added" = "x-std=gnu11" ; then + cat >>confdefs.h <<_ACEOF +#define JEMALLOC_HAS_RESTRICT 1 +_ACEOF + + else + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu99" >&5 +$as_echo_n "checking whether compiler supports -std=gnu99... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-std=gnu99 + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-std=gnu99 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + if test "x$je_cv_cflags_added" = "x-std=gnu99" ; then + cat >>confdefs.h <<_ACEOF +#define JEMALLOC_HAS_RESTRICT 1 +_ACEOF + + fi + fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5 +$as_echo_n "checking whether compiler supports -Wall... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Wall + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Wall + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wextra" >&5 +$as_echo_n "checking whether compiler supports -Wextra... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Wextra + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Wextra + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wshorten-64-to-32" >&5 +$as_echo_n "checking whether compiler supports -Wshorten-64-to-32... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Wshorten-64-to-32 + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Wshorten-64-to-32 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wsign-compare" >&5 +$as_echo_n "checking whether compiler supports -Wsign-compare... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Wsign-compare + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Wsign-compare + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wundef" >&5 +$as_echo_n "checking whether compiler supports -Wundef... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Wundef + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Wundef + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wno-format-zero-length" >&5 +$as_echo_n "checking whether compiler supports -Wno-format-zero-length... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Wno-format-zero-length + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Wno-format-zero-length + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -pipe" >&5 +$as_echo_n "checking whether compiler supports -pipe... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-pipe + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-pipe + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -g3" >&5 +$as_echo_n "checking whether compiler supports -g3... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-g3 + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-g3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + +elif test "x$je_cv_msvc" = "xyes" ; then + CC="$CC -nologo" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Zi" >&5 +$as_echo_n "checking whether compiler supports -Zi... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Zi + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Zi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -MT" >&5 +$as_echo_n "checking whether compiler supports -MT... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-MT + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-MT + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -W3" >&5 +$as_echo_n "checking whether compiler supports -W3... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-W3 + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-W3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -FS" >&5 +$as_echo_n "checking whether compiler supports -FS... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-FS + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-FS + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + T_APPEND_V=-I${srcdir}/include/msvc_compat + if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" +else + CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" +fi + + +fi +if test "x$je_cv_cray" = "xyes" ; then + if test "x$je_cv_cray_84" = "xyes" ; then + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hipa2" >&5 +$as_echo_n "checking whether compiler supports -hipa2... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-hipa2 + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-hipa2 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnognu" >&5 +$as_echo_n "checking whether compiler supports -hnognu... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-hnognu + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-hnognu + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=128" >&5 +$as_echo_n "checking whether compiler supports -hnomessage=128... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-hnomessage=128 + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-hnomessage=128 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=1357" >&5 +$as_echo_n "checking whether compiler supports -hnomessage=1357... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-hnomessage=1357 + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-hnomessage=1357 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + +fi + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 +$as_echo_n "checking how to run the C preprocessor... " >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if ${ac_cv_prog_CPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 +$as_echo "$CPP" >&6; } +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +# Check whether --enable-cxx was given. +if test "${enable_cxx+set}" = set; then : + enableval=$enable_cxx; if test "x$enable_cxx" = "xno" ; then + enable_cxx="0" +else + enable_cxx="1" +fi + +else + enable_cxx="1" + +fi + +if test "x$enable_cxx" = "x1" ; then + # =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional]) +# +# DESCRIPTION +# +# Check for baseline language coverage in the compiler for the specified +# version of the C++ standard. If necessary, add switches to CXX and +# CXXCPP to enable support. VERSION may be '11' (for the C++11 standard) +# or '14' (for the C++14 standard). +# +# The second argument, if specified, indicates whether you insist on an +# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g. +# -std=c++11). If neither is specified, you get whatever works, with +# preference for an extended mode. +# +# The third argument, if specified 'mandatory' or if left unspecified, +# indicates that baseline support for the specified C++ standard is +# required and that the macro should error out if no mode with that +# support is found. If specified 'optional', then configuration proceeds +# regardless, after defining HAVE_CXX${VERSION} if and only if a +# supporting mode is found. +# +# LICENSE +# +# Copyright (c) 2008 Benjamin Kosnik +# Copyright (c) 2012 Zack Weinberg +# Copyright (c) 2013 Roy Stogner +# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov +# Copyright (c) 2015 Paul Norman +# Copyright (c) 2015 Moritz Klammler +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 4 + + + + + + + + + + + + + + + + + + + + + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -z "$CXX"; then + if test -n "$CCC"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 +$as_echo "$CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 +$as_echo "$ac_ct_CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CXX" && break +done + + if test "x$ac_ct_CXX" = x; then + CXX="g++" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CXX=$ac_ct_CXX + fi +fi + + fi +fi +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 +$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } +if ${ac_cv_cxx_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 +$as_echo "$ac_cv_cxx_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GXX=yes +else + GXX= +fi +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 +$as_echo_n "checking whether $CXX accepts -g... " >&6; } +if ${ac_cv_prog_cxx_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +else + CXXFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 +$as_echo "$ac_cv_prog_cxx_g" >&6; } +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + ax_cxx_compile_cxx14_required=false + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + ac_success=no + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX supports C++14 features by default" >&5 +$as_echo_n "checking whether $CXX supports C++14 features by default... " >&6; } +if ${ax_cv_cxx_compile_cxx14+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +// If the compiler admits that it is not ready for C++11, why torture it? +// Hopefully, this will speed up the test. + +#ifndef __cplusplus + +#error "This is not a C++ compiler" + +#elif __cplusplus < 201103L + +#error "This is not a C++11 compiler" + +#else + +namespace cxx11 +{ + + namespace test_static_assert + { + + template + struct check + { + static_assert(sizeof(int) <= sizeof(T), "not big enough"); + }; + + } + + namespace test_final_override + { + + struct Base + { + virtual void f() {} + }; + + struct Derived : public Base + { + virtual void f() override {} + }; + + } + + namespace test_double_right_angle_brackets + { + + template < typename T > + struct check {}; + + typedef check single_type; + typedef check> double_type; + typedef check>> triple_type; + typedef check>>> quadruple_type; + + } + + namespace test_decltype + { + + int + f() + { + int a = 1; + decltype(a) b = 2; + return a + b; + } + + } + + namespace test_type_deduction + { + + template < typename T1, typename T2 > + struct is_same + { + static const bool value = false; + }; + + template < typename T > + struct is_same + { + static const bool value = true; + }; + + template < typename T1, typename T2 > + auto + add(T1 a1, T2 a2) -> decltype(a1 + a2) + { + return a1 + a2; + } + + int + test(const int c, volatile int v) + { + static_assert(is_same::value == true, ""); + static_assert(is_same::value == false, ""); + static_assert(is_same::value == false, ""); + auto ac = c; + auto av = v; + auto sumi = ac + av + 'x'; + auto sumf = ac + av + 1.0; + static_assert(is_same::value == true, ""); + static_assert(is_same::value == true, ""); + static_assert(is_same::value == true, ""); + static_assert(is_same::value == false, ""); + static_assert(is_same::value == true, ""); + return (sumf > 0.0) ? sumi : add(c, v); + } + + } + + namespace test_noexcept + { + + int f() { return 0; } + int g() noexcept { return 0; } + + static_assert(noexcept(f()) == false, ""); + static_assert(noexcept(g()) == true, ""); + + } + + namespace test_constexpr + { + + template < typename CharT > + unsigned long constexpr + strlen_c_r(const CharT *const s, const unsigned long acc) noexcept + { + return *s ? strlen_c_r(s + 1, acc + 1) : acc; + } + + template < typename CharT > + unsigned long constexpr + strlen_c(const CharT *const s) noexcept + { + return strlen_c_r(s, 0UL); + } + + static_assert(strlen_c("") == 0UL, ""); + static_assert(strlen_c("1") == 1UL, ""); + static_assert(strlen_c("example") == 7UL, ""); + static_assert(strlen_c("another\0example") == 7UL, ""); + + } + + namespace test_rvalue_references + { + + template < int N > + struct answer + { + static constexpr int value = N; + }; + + answer<1> f(int&) { return answer<1>(); } + answer<2> f(const int&) { return answer<2>(); } + answer<3> f(int&&) { return answer<3>(); } + + void + test() + { + int i = 0; + const int c = 0; + static_assert(decltype(f(i))::value == 1, ""); + static_assert(decltype(f(c))::value == 2, ""); + static_assert(decltype(f(0))::value == 3, ""); + } + + } + + namespace test_uniform_initialization + { + + struct test + { + static const int zero {}; + static const int one {1}; + }; + + static_assert(test::zero == 0, ""); + static_assert(test::one == 1, ""); + + } + + namespace test_lambdas + { + + void + test1() + { + auto lambda1 = [](){}; + auto lambda2 = lambda1; + lambda1(); + lambda2(); + } + + int + test2() + { + auto a = [](int i, int j){ return i + j; }(1, 2); + auto b = []() -> int { return '0'; }(); + auto c = [=](){ return a + b; }(); + auto d = [&](){ return c; }(); + auto e = [a, &b](int x) mutable { + const auto identity = [](int y){ return y; }; + for (auto i = 0; i < a; ++i) + a += b--; + return x + identity(a + b); + }(0); + return a + b + c + d + e; + } + + int + test3() + { + const auto nullary = [](){ return 0; }; + const auto unary = [](int x){ return x; }; + using nullary_t = decltype(nullary); + using unary_t = decltype(unary); + const auto higher1st = [](nullary_t f){ return f(); }; + const auto higher2nd = [unary](nullary_t f1){ + return [unary, f1](unary_t f2){ return f2(unary(f1())); }; + }; + return higher1st(nullary) + higher2nd(nullary)(unary); + } + + } + + namespace test_variadic_templates + { + + template + struct sum; + + template + struct sum + { + static constexpr auto value = N0 + sum::value; + }; + + template <> + struct sum<> + { + static constexpr auto value = 0; + }; + + static_assert(sum<>::value == 0, ""); + static_assert(sum<1>::value == 1, ""); + static_assert(sum<23>::value == 23, ""); + static_assert(sum<1, 2>::value == 3, ""); + static_assert(sum<5, 5, 11>::value == 21, ""); + static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); + + } + + // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae + // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function + // because of this. + namespace test_template_alias_sfinae + { + + struct foo {}; + + template + using member = typename T::member_type; + + template + void func(...) {} + + template + void func(member*) {} + + void test(); + + void test() { func(0); } + + } + +} // namespace cxx11 + +#endif // __cplusplus >= 201103L + + + + +// If the compiler admits that it is not ready for C++14, why torture it? +// Hopefully, this will speed up the test. + +#ifndef __cplusplus + +#error "This is not a C++ compiler" + +#elif __cplusplus < 201402L + +#error "This is not a C++14 compiler" + +#else + +namespace cxx14 +{ + + namespace test_polymorphic_lambdas + { + + int + test() + { + const auto lambda = [](auto&&... args){ + const auto istiny = [](auto x){ + return (sizeof(x) == 1UL) ? 1 : 0; + }; + const int aretiny[] = { istiny(args)... }; + return aretiny[0]; + }; + return lambda(1, 1L, 1.0f, '1'); + } + + } + + namespace test_binary_literals + { + + constexpr auto ivii = 0b0000000000101010; + static_assert(ivii == 42, "wrong value"); + + } + + namespace test_generalized_constexpr + { + + template < typename CharT > + constexpr unsigned long + strlen_c(const CharT *const s) noexcept + { + auto length = 0UL; + for (auto p = s; *p; ++p) + ++length; + return length; + } + + static_assert(strlen_c("") == 0UL, ""); + static_assert(strlen_c("x") == 1UL, ""); + static_assert(strlen_c("test") == 4UL, ""); + static_assert(strlen_c("another\0test") == 7UL, ""); + + } + + namespace test_lambda_init_capture + { + + int + test() + { + auto x = 0; + const auto lambda1 = [a = x](int b){ return a + b; }; + const auto lambda2 = [a = lambda1(x)](){ return a; }; + return lambda2(); + } + + } + + namespace test_digit_seperators + { + + constexpr auto ten_million = 100'000'000; + static_assert(ten_million == 100000000, ""); + + } + + namespace test_return_type_deduction + { + + auto f(int& x) { return x; } + decltype(auto) g(int& x) { return x; } + + template < typename T1, typename T2 > + struct is_same + { + static constexpr auto value = false; + }; + + template < typename T > + struct is_same + { + static constexpr auto value = true; + }; + + int + test() + { + auto x = 0; + static_assert(is_same::value, ""); + static_assert(is_same::value, ""); + return x; + } + + } + +} // namespace cxx14 + +#endif // __cplusplus >= 201402L + + + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ax_cv_cxx_compile_cxx14=yes +else + ax_cv_cxx_compile_cxx14=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_cxx_compile_cxx14" >&5 +$as_echo "$ax_cv_cxx_compile_cxx14" >&6; } + if test x$ax_cv_cxx_compile_cxx14 = xyes; then + ac_success=yes + fi + + + + if test x$ac_success = xno; then + for switch in -std=c++14 -std=c++0x +std=c++14 "-h std=c++14"; do + cachevar=`$as_echo "ax_cv_cxx_compile_cxx14_$switch" | $as_tr_sh` + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX supports C++14 features with $switch" >&5 +$as_echo_n "checking whether $CXX supports C++14 features with $switch... " >&6; } +if eval \${$cachevar+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_CXX="$CXX" + CXX="$CXX $switch" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +// If the compiler admits that it is not ready for C++11, why torture it? +// Hopefully, this will speed up the test. + +#ifndef __cplusplus + +#error "This is not a C++ compiler" + +#elif __cplusplus < 201103L + +#error "This is not a C++11 compiler" + +#else + +namespace cxx11 +{ + + namespace test_static_assert + { + + template + struct check + { + static_assert(sizeof(int) <= sizeof(T), "not big enough"); + }; + + } + + namespace test_final_override + { + + struct Base + { + virtual void f() {} + }; + + struct Derived : public Base + { + virtual void f() override {} + }; + + } + + namespace test_double_right_angle_brackets + { + + template < typename T > + struct check {}; + + typedef check single_type; + typedef check> double_type; + typedef check>> triple_type; + typedef check>>> quadruple_type; + + } + + namespace test_decltype + { + + int + f() + { + int a = 1; + decltype(a) b = 2; + return a + b; + } + + } + + namespace test_type_deduction + { + + template < typename T1, typename T2 > + struct is_same + { + static const bool value = false; + }; + + template < typename T > + struct is_same + { + static const bool value = true; + }; + + template < typename T1, typename T2 > + auto + add(T1 a1, T2 a2) -> decltype(a1 + a2) + { + return a1 + a2; + } + + int + test(const int c, volatile int v) + { + static_assert(is_same::value == true, ""); + static_assert(is_same::value == false, ""); + static_assert(is_same::value == false, ""); + auto ac = c; + auto av = v; + auto sumi = ac + av + 'x'; + auto sumf = ac + av + 1.0; + static_assert(is_same::value == true, ""); + static_assert(is_same::value == true, ""); + static_assert(is_same::value == true, ""); + static_assert(is_same::value == false, ""); + static_assert(is_same::value == true, ""); + return (sumf > 0.0) ? sumi : add(c, v); + } + + } + + namespace test_noexcept + { + + int f() { return 0; } + int g() noexcept { return 0; } + + static_assert(noexcept(f()) == false, ""); + static_assert(noexcept(g()) == true, ""); + + } + + namespace test_constexpr + { + + template < typename CharT > + unsigned long constexpr + strlen_c_r(const CharT *const s, const unsigned long acc) noexcept + { + return *s ? strlen_c_r(s + 1, acc + 1) : acc; + } + + template < typename CharT > + unsigned long constexpr + strlen_c(const CharT *const s) noexcept + { + return strlen_c_r(s, 0UL); + } + + static_assert(strlen_c("") == 0UL, ""); + static_assert(strlen_c("1") == 1UL, ""); + static_assert(strlen_c("example") == 7UL, ""); + static_assert(strlen_c("another\0example") == 7UL, ""); + + } + + namespace test_rvalue_references + { + + template < int N > + struct answer + { + static constexpr int value = N; + }; + + answer<1> f(int&) { return answer<1>(); } + answer<2> f(const int&) { return answer<2>(); } + answer<3> f(int&&) { return answer<3>(); } + + void + test() + { + int i = 0; + const int c = 0; + static_assert(decltype(f(i))::value == 1, ""); + static_assert(decltype(f(c))::value == 2, ""); + static_assert(decltype(f(0))::value == 3, ""); + } + + } + + namespace test_uniform_initialization + { + + struct test + { + static const int zero {}; + static const int one {1}; + }; + + static_assert(test::zero == 0, ""); + static_assert(test::one == 1, ""); + + } + + namespace test_lambdas + { + + void + test1() + { + auto lambda1 = [](){}; + auto lambda2 = lambda1; + lambda1(); + lambda2(); + } + + int + test2() + { + auto a = [](int i, int j){ return i + j; }(1, 2); + auto b = []() -> int { return '0'; }(); + auto c = [=](){ return a + b; }(); + auto d = [&](){ return c; }(); + auto e = [a, &b](int x) mutable { + const auto identity = [](int y){ return y; }; + for (auto i = 0; i < a; ++i) + a += b--; + return x + identity(a + b); + }(0); + return a + b + c + d + e; + } + + int + test3() + { + const auto nullary = [](){ return 0; }; + const auto unary = [](int x){ return x; }; + using nullary_t = decltype(nullary); + using unary_t = decltype(unary); + const auto higher1st = [](nullary_t f){ return f(); }; + const auto higher2nd = [unary](nullary_t f1){ + return [unary, f1](unary_t f2){ return f2(unary(f1())); }; + }; + return higher1st(nullary) + higher2nd(nullary)(unary); + } + + } + + namespace test_variadic_templates + { + + template + struct sum; + + template + struct sum + { + static constexpr auto value = N0 + sum::value; + }; + + template <> + struct sum<> + { + static constexpr auto value = 0; + }; + + static_assert(sum<>::value == 0, ""); + static_assert(sum<1>::value == 1, ""); + static_assert(sum<23>::value == 23, ""); + static_assert(sum<1, 2>::value == 3, ""); + static_assert(sum<5, 5, 11>::value == 21, ""); + static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); + + } + + // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae + // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function + // because of this. + namespace test_template_alias_sfinae + { + + struct foo {}; + + template + using member = typename T::member_type; + + template + void func(...) {} + + template + void func(member*) {} + + void test(); + + void test() { func(0); } + + } + +} // namespace cxx11 + +#endif // __cplusplus >= 201103L + + + + +// If the compiler admits that it is not ready for C++14, why torture it? +// Hopefully, this will speed up the test. + +#ifndef __cplusplus + +#error "This is not a C++ compiler" + +#elif __cplusplus < 201402L + +#error "This is not a C++14 compiler" + +#else + +namespace cxx14 +{ + + namespace test_polymorphic_lambdas + { + + int + test() + { + const auto lambda = [](auto&&... args){ + const auto istiny = [](auto x){ + return (sizeof(x) == 1UL) ? 1 : 0; + }; + const int aretiny[] = { istiny(args)... }; + return aretiny[0]; + }; + return lambda(1, 1L, 1.0f, '1'); + } + + } + + namespace test_binary_literals + { + + constexpr auto ivii = 0b0000000000101010; + static_assert(ivii == 42, "wrong value"); + + } + + namespace test_generalized_constexpr + { + + template < typename CharT > + constexpr unsigned long + strlen_c(const CharT *const s) noexcept + { + auto length = 0UL; + for (auto p = s; *p; ++p) + ++length; + return length; + } + + static_assert(strlen_c("") == 0UL, ""); + static_assert(strlen_c("x") == 1UL, ""); + static_assert(strlen_c("test") == 4UL, ""); + static_assert(strlen_c("another\0test") == 7UL, ""); + + } + + namespace test_lambda_init_capture + { + + int + test() + { + auto x = 0; + const auto lambda1 = [a = x](int b){ return a + b; }; + const auto lambda2 = [a = lambda1(x)](){ return a; }; + return lambda2(); + } + + } + + namespace test_digit_seperators + { + + constexpr auto ten_million = 100'000'000; + static_assert(ten_million == 100000000, ""); + + } + + namespace test_return_type_deduction + { + + auto f(int& x) { return x; } + decltype(auto) g(int& x) { return x; } + + template < typename T1, typename T2 > + struct is_same + { + static constexpr auto value = false; + }; + + template < typename T > + struct is_same + { + static constexpr auto value = true; + }; + + int + test() + { + auto x = 0; + static_assert(is_same::value, ""); + static_assert(is_same::value, ""); + return x; + } + + } + +} // namespace cxx14 + +#endif // __cplusplus >= 201402L + + + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + eval $cachevar=yes +else + eval $cachevar=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + CXX="$ac_save_CXX" +fi +eval ac_res=\$$cachevar + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + if eval test x\$$cachevar = xyes; then + CXX="$CXX $switch" + if test -n "$CXXCPP" ; then + CXXCPP="$CXXCPP $switch" + fi + ac_success=yes + break + fi + done + fi + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + if test x$ax_cxx_compile_cxx14_required = xtrue; then + if test x$ac_success = xno; then + as_fn_error $? "*** A compiler with support for C++14 language features is required." "$LINENO" 5 + fi + fi + if test x$ac_success = xno; then + HAVE_CXX14=0 + { $as_echo "$as_me:${as_lineno-$LINENO}: No compiler with C++14 support was found" >&5 +$as_echo "$as_me: No compiler with C++14 support was found" >&6;} + else + HAVE_CXX14=1 + +$as_echo "#define HAVE_CXX14 1" >>confdefs.h + + fi + + + if test "x${HAVE_CXX14}" = "x1" ; then + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5 +$as_echo_n "checking whether compiler supports -Wall... " >&6; } +T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" +T_APPEND_V=-Wall + if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" +else + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + je_cv_cxxflags_added=-Wall + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cxxflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wextra" >&5 +$as_echo_n "checking whether compiler supports -Wextra... " >&6; } +T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" +T_APPEND_V=-Wextra + if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" +else + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + je_cv_cxxflags_added=-Wextra + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cxxflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -g3" >&5 +$as_echo_n "checking whether compiler supports -g3... " >&6; } +T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" +T_APPEND_V=-g3 + if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" +else + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + je_cv_cxxflags_added=-g3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cxxflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + + + + SAVED_LIBS="${LIBS}" + T_APPEND_V=-lstdc++ + if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + LIBS="${LIBS}${T_APPEND_V}" +else + LIBS="${LIBS} ${T_APPEND_V}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether libstdc++ linkage is compilable" >&5 +$as_echo_n "checking whether libstdc++ linkage is compilable... " >&6; } +if ${je_cv_libstdcxx+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + int *arr = (int *)malloc(sizeof(int) * 42); + if (arr == NULL) + return 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_libstdcxx=yes +else + je_cv_libstdcxx=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_libstdcxx" >&5 +$as_echo "$je_cv_libstdcxx" >&6; } + + if test "x${je_cv_libstdcxx}" = "xno" ; then + LIBS="${SAVED_LIBS}" + fi + else + enable_cxx="0" + fi +fi + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } +if ${ac_cv_path_GREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_GREP" || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } +if ${ac_cv_path_EGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_EGREP" || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 +$as_echo_n "checking whether byte ordering is bigendian... " >&6; } +if ${ac_cv_c_bigendian+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_c_bigendian=unknown + # See if we're dealing with a universal compiler. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifndef __APPLE_CC__ + not a universal capable compiler + #endif + typedef int dummy; + +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + # Check for potential -arch flags. It is not universal unless + # there are at least two -arch flags with different values. + ac_arch= + ac_prev= + for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do + if test -n "$ac_prev"; then + case $ac_word in + i?86 | x86_64 | ppc | ppc64) + if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then + ac_arch=$ac_word + else + ac_cv_c_bigendian=universal + break + fi + ;; + esac + ac_prev= + elif test "x$ac_word" = "x-arch"; then + ac_prev=arch + fi + done +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + if test $ac_cv_c_bigendian = unknown; then + # See if sys/param.h defines the BYTE_ORDER macro. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + #include + +int +main () +{ +#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ + && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ + && LITTLE_ENDIAN) + bogus endian macros + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + # It does; now see whether it defined to BIG_ENDIAN or not. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + #include + +int +main () +{ +#if BYTE_ORDER != BIG_ENDIAN + not big endian + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_bigendian=yes +else + ac_cv_c_bigendian=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + if test $ac_cv_c_bigendian = unknown; then + # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +int +main () +{ +#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) + bogus endian macros + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + # It does; now see whether it defined to _BIG_ENDIAN or not. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +int +main () +{ +#ifndef _BIG_ENDIAN + not big endian + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_bigendian=yes +else + ac_cv_c_bigendian=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + if test $ac_cv_c_bigendian = unknown; then + # Compile a test program. + if test "$cross_compiling" = yes; then : + # Try to guess by grepping values from an object file. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +short int ascii_mm[] = + { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; + short int ascii_ii[] = + { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; + int use_ascii (int i) { + return ascii_mm[i] + ascii_ii[i]; + } + short int ebcdic_ii[] = + { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; + short int ebcdic_mm[] = + { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; + int use_ebcdic (int i) { + return ebcdic_mm[i] + ebcdic_ii[i]; + } + extern int foo; + +int +main () +{ +return use_ascii (foo) == use_ebcdic (foo); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then + ac_cv_c_bigendian=yes + fi + if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then + if test "$ac_cv_c_bigendian" = unknown; then + ac_cv_c_bigendian=no + else + # finding both strings is unlikely to happen, but who knows? + ac_cv_c_bigendian=unknown + fi + fi +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ + + /* Are we little or big endian? From Harbison&Steele. */ + union + { + long int l; + char c[sizeof (long int)]; + } u; + u.l = 1; + return u.c[sizeof (long int) - 1] == 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + ac_cv_c_bigendian=no +else + ac_cv_c_bigendian=yes +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 +$as_echo "$ac_cv_c_bigendian" >&6; } + case $ac_cv_c_bigendian in #( + yes) + ac_cv_big_endian=1;; #( + no) + ac_cv_big_endian=0 ;; #( + universal) + +$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h + + ;; #( + *) + as_fn_error $? "unknown endianness + presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; + esac + +if test "x${ac_cv_big_endian}" = "x1" ; then + cat >>confdefs.h <<_ACEOF +#define JEMALLOC_BIG_ENDIAN +_ACEOF + +fi + +if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then + T_APPEND_V=-I${srcdir}/include/msvc_compat/C99 + if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" +else + CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" +fi + + +fi + +if test "x${je_cv_msvc}" = "xyes" ; then + LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&5 +$as_echo "Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&6; } +else + # The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of void *" >&5 +$as_echo_n "checking size of void *... " >&6; } +if ${ac_cv_sizeof_void_p+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (void *))" "ac_cv_sizeof_void_p" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_void_p" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (void *) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_void_p=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_void_p" >&5 +$as_echo "$ac_cv_sizeof_void_p" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_VOID_P $ac_cv_sizeof_void_p +_ACEOF + + + if test "x${ac_cv_sizeof_void_p}" = "x8" ; then + LG_SIZEOF_PTR=3 + elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then + LG_SIZEOF_PTR=2 + else + as_fn_error $? "Unsupported pointer size: ${ac_cv_sizeof_void_p}" "$LINENO" 5 + fi +fi +cat >>confdefs.h <<_ACEOF +#define LG_SIZEOF_PTR $LG_SIZEOF_PTR +_ACEOF + + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int" >&5 +$as_echo_n "checking size of int... " >&6; } +if ${ac_cv_sizeof_int+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int))" "ac_cv_sizeof_int" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_int" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (int) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_int=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_int" >&5 +$as_echo "$ac_cv_sizeof_int" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_INT $ac_cv_sizeof_int +_ACEOF + + +if test "x${ac_cv_sizeof_int}" = "x8" ; then + LG_SIZEOF_INT=3 +elif test "x${ac_cv_sizeof_int}" = "x4" ; then + LG_SIZEOF_INT=2 +else + as_fn_error $? "Unsupported int size: ${ac_cv_sizeof_int}" "$LINENO" 5 +fi +cat >>confdefs.h <<_ACEOF +#define LG_SIZEOF_INT $LG_SIZEOF_INT +_ACEOF + + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long" >&5 +$as_echo_n "checking size of long... " >&6; } +if ${ac_cv_sizeof_long+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long))" "ac_cv_sizeof_long" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_long" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (long) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_long=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long" >&5 +$as_echo "$ac_cv_sizeof_long" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_LONG $ac_cv_sizeof_long +_ACEOF + + +if test "x${ac_cv_sizeof_long}" = "x8" ; then + LG_SIZEOF_LONG=3 +elif test "x${ac_cv_sizeof_long}" = "x4" ; then + LG_SIZEOF_LONG=2 +else + as_fn_error $? "Unsupported long size: ${ac_cv_sizeof_long}" "$LINENO" 5 +fi +cat >>confdefs.h <<_ACEOF +#define LG_SIZEOF_LONG $LG_SIZEOF_LONG +_ACEOF + + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long long" >&5 +$as_echo_n "checking size of long long... " >&6; } +if ${ac_cv_sizeof_long_long+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long long))" "ac_cv_sizeof_long_long" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_long_long" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (long long) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_long_long=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_long" >&5 +$as_echo "$ac_cv_sizeof_long_long" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_LONG_LONG $ac_cv_sizeof_long_long +_ACEOF + + +if test "x${ac_cv_sizeof_long_long}" = "x8" ; then + LG_SIZEOF_LONG_LONG=3 +elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then + LG_SIZEOF_LONG_LONG=2 +else + as_fn_error $? "Unsupported long long size: ${ac_cv_sizeof_long_long}" "$LINENO" 5 +fi +cat >>confdefs.h <<_ACEOF +#define LG_SIZEOF_LONG_LONG $LG_SIZEOF_LONG_LONG +_ACEOF + + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of intmax_t" >&5 +$as_echo_n "checking size of intmax_t... " >&6; } +if ${ac_cv_sizeof_intmax_t+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (intmax_t))" "ac_cv_sizeof_intmax_t" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_intmax_t" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (intmax_t) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_intmax_t=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_intmax_t" >&5 +$as_echo "$ac_cv_sizeof_intmax_t" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_INTMAX_T $ac_cv_sizeof_intmax_t +_ACEOF + + +if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then + LG_SIZEOF_INTMAX_T=4 +elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then + LG_SIZEOF_INTMAX_T=3 +elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then + LG_SIZEOF_INTMAX_T=2 +else + as_fn_error $? "Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}" "$LINENO" 5 +fi +cat >>confdefs.h <<_ACEOF +#define LG_SIZEOF_INTMAX_T $LG_SIZEOF_INTMAX_T +_ACEOF + + +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +$as_echo_n "checking build system type... " >&6; } +if ${ac_cv_build+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +$as_echo "$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } +if ${ac_cv_host+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + +CPU_SPINWAIT="" +case "${host_cpu}" in + i686|x86_64) + HAVE_CPU_SPINWAIT=1 + if test "x${je_cv_msvc}" = "xyes" ; then + if ${je_cv_pause_msvc+:} false; then : + $as_echo_n "(cached) " >&6 +else + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction MSVC is compilable" >&5 +$as_echo_n "checking whether pause instruction MSVC is compilable... " >&6; } +if ${je_cv_pause_msvc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +_mm_pause(); return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_pause_msvc=yes +else + je_cv_pause_msvc=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause_msvc" >&5 +$as_echo "$je_cv_pause_msvc" >&6; } + +fi + + if test "x${je_cv_pause_msvc}" = "xyes" ; then + CPU_SPINWAIT='_mm_pause()' + fi + else + if ${je_cv_pause+:} false; then : + $as_echo_n "(cached) " >&6 +else + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction is compilable" >&5 +$as_echo_n "checking whether pause instruction is compilable... " >&6; } +if ${je_cv_pause+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +__asm__ volatile("pause"); return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_pause=yes +else + je_cv_pause=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause" >&5 +$as_echo "$je_cv_pause" >&6; } + +fi + + if test "x${je_cv_pause}" = "xyes" ; then + CPU_SPINWAIT='__asm__ volatile("pause")' + fi + fi + ;; + *) + HAVE_CPU_SPINWAIT=0 + ;; +esac +cat >>confdefs.h <<_ACEOF +#define HAVE_CPU_SPINWAIT $HAVE_CPU_SPINWAIT +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define CPU_SPINWAIT $CPU_SPINWAIT +_ACEOF + + + +# Check whether --with-lg_vaddr was given. +if test "${with_lg_vaddr+set}" = set; then : + withval=$with_lg_vaddr; LG_VADDR="$with_lg_vaddr" +else + LG_VADDR="detect" +fi + + +case "${host_cpu}" in + aarch64) + if test "x$LG_VADDR" = "xdetect"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking number of significant virtual address bits" >&5 +$as_echo_n "checking number of significant virtual address bits... " >&6; } + if test "x${LG_SIZEOF_PTR}" = "x2" ; then + #aarch64 ILP32 + LG_VADDR=32 + else + #aarch64 LP64 + LG_VADDR=48 + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LG_VADDR" >&5 +$as_echo "$LG_VADDR" >&6; } + fi + ;; + x86_64) + if test "x$LG_VADDR" = "xdetect"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking number of significant virtual address bits" >&5 +$as_echo_n "checking number of significant virtual address bits... " >&6; } +if ${je_cv_lg_vaddr+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + je_cv_lg_vaddr=57 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#ifdef _WIN32 +#include +#include +typedef unsigned __int32 uint32_t; +#else +#include +#endif + +int +main () +{ + + uint32_t r[4]; + uint32_t eax_in = 0x80000008U; +#ifdef _WIN32 + __cpuid((int *)r, (int)eax_in); +#else + asm volatile ("cpuid" + : "=a" (r[0]), "=b" (r[1]), "=c" (r[2]), "=d" (r[3]) + : "a" (eax_in), "c" (0) + ); +#endif + uint32_t eax_out = r[0]; + uint32_t vaddr = ((eax_out & 0x0000ff00U) >> 8); + FILE *f = fopen("conftest.out", "w"); + if (f == NULL) { + return 1; + } + if (vaddr > (sizeof(void *) << 3)) { + vaddr = sizeof(void *) << 3; + } + fprintf(f, "%u", vaddr); + fclose(f); + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + je_cv_lg_vaddr=`cat conftest.out` +else + je_cv_lg_vaddr=error +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_lg_vaddr" >&5 +$as_echo "$je_cv_lg_vaddr" >&6; } + if test "x${je_cv_lg_vaddr}" != "x" ; then + LG_VADDR="${je_cv_lg_vaddr}" + fi + if test "x${LG_VADDR}" != "xerror" ; then + cat >>confdefs.h <<_ACEOF +#define LG_VADDR $LG_VADDR +_ACEOF + + else + as_fn_error $? "cannot determine number of significant virtual address bits" "$LINENO" 5 + fi + fi + ;; + *) + if test "x$LG_VADDR" = "xdetect"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking number of significant virtual address bits" >&5 +$as_echo_n "checking number of significant virtual address bits... " >&6; } + if test "x${LG_SIZEOF_PTR}" = "x3" ; then + LG_VADDR=64 + elif test "x${LG_SIZEOF_PTR}" = "x2" ; then + LG_VADDR=32 + elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then + LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))" + else + as_fn_error $? "Unsupported lg(pointer size): ${LG_SIZEOF_PTR}" "$LINENO" 5 + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LG_VADDR" >&5 +$as_echo "$LG_VADDR" >&6; } + fi + ;; +esac +cat >>confdefs.h <<_ACEOF +#define LG_VADDR $LG_VADDR +_ACEOF + + +LD_PRELOAD_VAR="LD_PRELOAD" +so="so" +importlib="${so}" +o="$ac_objext" +a="a" +exe="$ac_exeext" +libprefix="lib" +link_whole_archive="0" +DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' +RPATH='-Wl,-rpath,$(1)' +SOREV="${so}.${rev}" +PIC_CFLAGS='-fPIC -DPIC' +CTARGET='-o $@' +LDTARGET='-o $@' +TEST_LD_MODE= +EXTRA_LDFLAGS= +ARFLAGS='crus' +AROUT=' $@' +CC_MM=1 + +if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then + TEST_LD_MODE='-dynamic' +fi + +if test "x${je_cv_cray}" = "xyes" ; then + CC_MM= +fi + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +set dummy ${ac_tool_prefix}ar; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AR="${ac_tool_prefix}ar" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +$as_echo "$AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_AR"; then + ac_ct_AR=$AR + # Extract the first word of "ar", so it can be a program name with args. +set dummy ar; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AR="ar" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +$as_echo "$ac_ct_AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_AR" = x; then + AR=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +else + AR="$ac_cv_prog_AR" +fi + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}nm", so it can be a program name with args. +set dummy ${ac_tool_prefix}nm; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_NM+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$NM"; then + ac_cv_prog_NM="$NM" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_NM="${ac_tool_prefix}nm" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +NM=$ac_cv_prog_NM +if test -n "$NM"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NM" >&5 +$as_echo "$NM" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_NM"; then + ac_ct_NM=$NM + # Extract the first word of "nm", so it can be a program name with args. +set dummy nm; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_NM+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_NM"; then + ac_cv_prog_ac_ct_NM="$ac_ct_NM" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_NM="nm" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_NM=$ac_cv_prog_ac_ct_NM +if test -n "$ac_ct_NM"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NM" >&5 +$as_echo "$ac_ct_NM" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_NM" = x; then + NM=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + NM=$ac_ct_NM + fi +else + NM="$ac_cv_prog_NM" +fi + + +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AWK+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AWK="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +$as_echo "$AWK" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AWK" && break +done + + + + +# Check whether --with-version was given. +if test "${with_version+set}" = set; then : + withval=$with_version; + echo "${with_version}" | grep '^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$' 2>&1 1>/dev/null + if test $? -eq 0 ; then + echo "$with_version" > "${objroot}VERSION" + else + echo "${with_version}" | grep '^VERSION$' 2>&1 1>/dev/null + if test $? -ne 0 ; then + as_fn_error $? "${with_version} does not match ..--g or VERSION" "$LINENO" 5 + fi + fi + +else + + if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then + for pattern in '[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ + '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ + '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ + '[0-9][0-9].[0-9][0-9].[0-9]' \ + '[0-9][0-9].[0-9][0-9].[0-9][0-9]'; do + (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null + if test $? -eq 0 ; then + mv "${objroot}VERSION.tmp" "${objroot}VERSION" + break + fi + done + fi + rm -f "${objroot}VERSION.tmp" + +fi + + +if test ! -e "${objroot}VERSION" ; then + if test ! -e "${srcroot}VERSION" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Missing VERSION file, and unable to generate it; creating bogus VERSION" >&5 +$as_echo "Missing VERSION file, and unable to generate it; creating bogus VERSION" >&6; } + echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION" + else + cp ${srcroot}VERSION ${objroot}VERSION + fi +fi +jemalloc_version=`cat "${objroot}VERSION"` +jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $1}'` +jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $2}'` +jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $3}'` +jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $4}'` +jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $5}'` + + + + + + + +default_retain="0" +maps_coalesce="1" +DUMP_SYMS="${NM} -a" +SYM_PREFIX="" +case "${host}" in + *-*-darwin* | *-*-ios*) + abi="macho" + RPATH="" + LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" + so="dylib" + importlib="${so}" + force_tls="0" + DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)' + SOREV="${rev}.${so}" + sbrk_deprecated="1" + SYM_PREFIX="_" + ;; + *-*-freebsd*) + abi="elf" + $as_echo "#define JEMALLOC_SYSCTL_VM_OVERCOMMIT " >>confdefs.h + + force_lazy_lock="1" + ;; + *-*-dragonfly*) + abi="elf" + ;; + *-*-openbsd*) + abi="elf" + force_tls="0" + ;; + *-*-bitrig*) + abi="elf" + ;; + *-*-linux-android) + T_APPEND_V=-D_GNU_SOURCE + if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" +else + CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" +fi + + + abi="elf" + $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS " >>confdefs.h + + $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h + + $as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY " >>confdefs.h + + $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h + + $as_echo "#define JEMALLOC_C11_ATOMICS 1" >>confdefs.h + + force_tls="0" + if test "${LG_SIZEOF_PTR}" = "3"; then + default_retain="1" + fi + ;; + *-*-linux*) + T_APPEND_V=-D_GNU_SOURCE + if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" +else + CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" +fi + + + abi="elf" + $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS " >>confdefs.h + + $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h + + $as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY " >>confdefs.h + + $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h + + $as_echo "#define JEMALLOC_USE_CXX_THROW " >>confdefs.h + + if test "${LG_SIZEOF_PTR}" = "3"; then + default_retain="1" + fi + ;; + *-*-kfreebsd*) + T_APPEND_V=-D_GNU_SOURCE + if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" +else + CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" +fi + + + abi="elf" + $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h + + $as_echo "#define JEMALLOC_SYSCTL_VM_OVERCOMMIT " >>confdefs.h + + $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h + + $as_echo "#define JEMALLOC_USE_CXX_THROW " >>confdefs.h + + ;; + *-*-netbsd*) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking ABI" >&5 +$as_echo_n "checking ABI... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __ELF__ +/* ELF */ +#else +#error aout +#endif + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + abi="elf" +else + abi="aout" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $abi" >&5 +$as_echo "$abi" >&6; } + ;; + *-*-solaris2*) + abi="elf" + RPATH='-Wl,-R,$(1)' + T_APPEND_V=-D_POSIX_PTHREAD_SEMANTICS + if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" +else + CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" +fi + + + T_APPEND_V=-lposix4 -lsocket -lnsl + if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + LIBS="${LIBS}${T_APPEND_V}" +else + LIBS="${LIBS} ${T_APPEND_V}" +fi + + + ;; + *-ibm-aix*) + if test "${LG_SIZEOF_PTR}" = "3"; then + LD_PRELOAD_VAR="LDR_PRELOAD64" + else + LD_PRELOAD_VAR="LDR_PRELOAD" + fi + abi="xcoff" + ;; + *-*-mingw* | *-*-cygwin*) + abi="pecoff" + force_tls="0" + maps_coalesce="0" + RPATH="" + so="dll" + if test "x$je_cv_msvc" = "xyes" ; then + importlib="lib" + DSO_LDFLAGS="-LD" + EXTRA_LDFLAGS="-link -DEBUG" + CTARGET='-Fo$@' + LDTARGET='-Fe$@' + AR='lib' + ARFLAGS='-nologo -out:' + AROUT='$@' + CC_MM= + else + importlib="${so}" + DSO_LDFLAGS="-shared" + link_whole_archive="1" + fi + case "${host}" in + *-*-cygwin*) + DUMP_SYMS="dumpbin /SYMBOLS" + ;; + *) + ;; + esac + a="lib" + libprefix="" + SOREV="${so}" + PIC_CFLAGS="" + if test "${LG_SIZEOF_PTR}" = "3"; then + default_retain="1" + fi + ;; + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Unsupported operating system: ${host}" >&5 +$as_echo "Unsupported operating system: ${host}" >&6; } + abi="elf" + ;; +esac + +JEMALLOC_USABLE_SIZE_CONST=const +for ac_header in malloc.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "malloc.h" "ac_cv_header_malloc_h" "$ac_includes_default" +if test "x$ac_cv_header_malloc_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_MALLOC_H 1 +_ACEOF + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether malloc_usable_size definition can use const argument" >&5 +$as_echo_n "checking whether malloc_usable_size definition can use const argument... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + #include + size_t malloc_usable_size(const void *ptr); + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +else + + JEMALLOC_USABLE_SIZE_CONST= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi + +done + +cat >>confdefs.h <<_ACEOF +#define JEMALLOC_USABLE_SIZE_CONST $JEMALLOC_USABLE_SIZE_CONST +_ACEOF + + + + + + + + + + + + + + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing log" >&5 +$as_echo_n "checking for library containing log... " >&6; } +if ${ac_cv_search_log+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char log (); +int +main () +{ +return log (); + ; + return 0; +} +_ACEOF +for ac_lib in '' m; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_log=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_log+:} false; then : + break +fi +done +if ${ac_cv_search_log+:} false; then : + +else + ac_cv_search_log=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_log" >&5 +$as_echo "$ac_cv_search_log" >&6; } +ac_res=$ac_cv_search_log +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +else + as_fn_error $? "Missing math functions" "$LINENO" 5 +fi + +if test "x$ac_cv_search_log" != "xnone required" ; then + LM="$ac_cv_search_log" +else + LM= +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __attribute__ syntax is compilable" >&5 +$as_echo_n "checking whether __attribute__ syntax is compilable... " >&6; } +if ${je_cv_attribute+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +static __attribute__((unused)) void foo(void){} +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_attribute=yes +else + je_cv_attribute=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_attribute" >&5 +$as_echo "$je_cv_attribute" >&6; } + +if test "x${je_cv_attribute}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_ATTR " >>confdefs.h + + if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fvisibility=hidden" >&5 +$as_echo_n "checking whether compiler supports -fvisibility=hidden... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-fvisibility=hidden + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-fvisibility=hidden + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fvisibility=hidden" >&5 +$as_echo_n "checking whether compiler supports -fvisibility=hidden... " >&6; } +T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" +T_APPEND_V=-fvisibility=hidden + if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" +else + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + je_cv_cxxflags_added=-fvisibility=hidden + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cxxflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + + + fi +fi +SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 +$as_echo_n "checking whether compiler supports -Werror... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Werror + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Werror + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 +$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-herror_on_warning + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-herror_on_warning + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether tls_model attribute is compilable" >&5 +$as_echo_n "checking whether tls_model attribute is compilable... " >&6; } +if ${je_cv_tls_model+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +static __thread int + __attribute__((tls_model("initial-exec"), unused)) foo; + foo = 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_tls_model=yes +else + je_cv_tls_model=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_tls_model" >&5 +$as_echo "$je_cv_tls_model" >&6; } + +CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 +$as_echo_n "checking whether compiler supports -Werror... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Werror + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Werror + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 +$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-herror_on_warning + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-herror_on_warning + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether alloc_size attribute is compilable" >&5 +$as_echo_n "checking whether alloc_size attribute is compilable... " >&6; } +if ${je_cv_alloc_size+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +void *foo(size_t size) __attribute__((alloc_size(1))); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_alloc_size=yes +else + je_cv_alloc_size=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_alloc_size" >&5 +$as_echo "$je_cv_alloc_size" >&6; } + +CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + +if test "x${je_cv_alloc_size}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE " >>confdefs.h + +fi +SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 +$as_echo_n "checking whether compiler supports -Werror... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Werror + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Werror + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 +$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-herror_on_warning + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-herror_on_warning + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(gnu_printf, ...) attribute is compilable" >&5 +$as_echo_n "checking whether format(gnu_printf, ...) attribute is compilable... " >&6; } +if ${je_cv_format_gnu_printf+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2))); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_format_gnu_printf=yes +else + je_cv_format_gnu_printf=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_gnu_printf" >&5 +$as_echo "$je_cv_format_gnu_printf" >&6; } + +CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + +if test "x${je_cv_format_gnu_printf}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF " >>confdefs.h + +fi +SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 +$as_echo_n "checking whether compiler supports -Werror... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Werror + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Werror + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 +$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-herror_on_warning + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-herror_on_warning + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(printf, ...) attribute is compilable" >&5 +$as_echo_n "checking whether format(printf, ...) attribute is compilable... " >&6; } +if ${je_cv_format_printf+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +void *foo(const char *format, ...) __attribute__((format(printf, 1, 2))); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_format_printf=yes +else + je_cv_format_printf=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_printf" >&5 +$as_echo "$je_cv_format_printf" >&6; } + +CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + +if test "x${je_cv_format_printf}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF " >>confdefs.h + +fi + +SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 +$as_echo_n "checking whether compiler supports -Werror... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Werror + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Werror + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 +$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-herror_on_warning + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-herror_on_warning + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(printf, ...) attribute is compilable" >&5 +$as_echo_n "checking whether format(printf, ...) attribute is compilable... " >&6; } +if ${je_cv_format_arg+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +const char * __attribute__((__format_arg__(1))) foo(const char *format); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_format_arg=yes +else + je_cv_format_arg=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_arg" >&5 +$as_echo "$je_cv_format_arg" >&6; } + +CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + +if test "x${je_cv_format_arg}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_ARG " >>confdefs.h + +fi + + +# Check whether --with-rpath was given. +if test "${with_rpath+set}" = set; then : + withval=$with_rpath; if test "x$with_rpath" = "xno" ; then + RPATH_EXTRA= +else + RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`" +fi +else + RPATH_EXTRA= + +fi + + + +# Check whether --enable-autogen was given. +if test "${enable_autogen+set}" = set; then : + enableval=$enable_autogen; if test "x$enable_autogen" = "xno" ; then + enable_autogen="0" +else + enable_autogen="1" +fi + +else + enable_autogen="0" + +fi + + + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +$as_echo_n "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if ${ac_cv_path_install+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in #(( + ./ | .// | /[cC]/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac + + done +IFS=$as_save_IFS + +rm -rf conftest.one conftest.two conftest.dir + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +$as_echo "$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +$as_echo "$RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +$as_echo "$ac_ct_RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB + fi +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +# Extract the first word of "ld", so it can be a program name with args. +set dummy ld; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $LD in + [\\/]* | ?:[\\/]*) + ac_cv_path_LD="$LD" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_LD="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_LD" && ac_cv_path_LD="false" + ;; +esac +fi +LD=$ac_cv_path_LD +if test -n "$LD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "autoconf", so it can be a program name with args. +set dummy autoconf; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_AUTOCONF+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $AUTOCONF in + [\\/]* | ?:[\\/]*) + ac_cv_path_AUTOCONF="$AUTOCONF" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_AUTOCONF="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_AUTOCONF" && ac_cv_path_AUTOCONF="false" + ;; +esac +fi +AUTOCONF=$ac_cv_path_AUTOCONF +if test -n "$AUTOCONF"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AUTOCONF" >&5 +$as_echo "$AUTOCONF" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + +# Check whether --enable-doc was given. +if test "${enable_doc+set}" = set; then : + enableval=$enable_doc; if test "x$enable_doc" = "xno" ; then + enable_doc="0" +else + enable_doc="1" +fi + +else + enable_doc="1" + +fi + + + +# Check whether --enable-shared was given. +if test "${enable_shared+set}" = set; then : + enableval=$enable_shared; if test "x$enable_shared" = "xno" ; then + enable_shared="0" +else + enable_shared="1" +fi + +else + enable_shared="1" + +fi + + + +# Check whether --enable-static was given. +if test "${enable_static+set}" = set; then : + enableval=$enable_static; if test "x$enable_static" = "xno" ; then + enable_static="0" +else + enable_static="1" +fi + +else + enable_static="1" + +fi + + + +if test "$enable_shared$enable_static" = "00" ; then + as_fn_error $? "Please enable one of shared or static builds" "$LINENO" 5 +fi + + +# Check whether --with-mangling was given. +if test "${with_mangling+set}" = set; then : + withval=$with_mangling; mangling_map="$with_mangling" +else + mangling_map="" +fi + + + +# Check whether --with-jemalloc_prefix was given. +if test "${with_jemalloc_prefix+set}" = set; then : + withval=$with_jemalloc_prefix; JEMALLOC_PREFIX="$with_jemalloc_prefix" +else + if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then + JEMALLOC_PREFIX="" +else + JEMALLOC_PREFIX="je_" +fi + +fi + +if test "x$JEMALLOC_PREFIX" = "x" ; then + $as_echo "#define JEMALLOC_IS_MALLOC 1" >>confdefs.h + +else + JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"` + cat >>confdefs.h <<_ACEOF +#define JEMALLOC_PREFIX "$JEMALLOC_PREFIX" +_ACEOF + + cat >>confdefs.h <<_ACEOF +#define JEMALLOC_CPREFIX "$JEMALLOC_CPREFIX" +_ACEOF + +fi + + + + +# Check whether --with-export was given. +if test "${with_export+set}" = set; then : + withval=$with_export; if test "x$with_export" = "xno"; then + $as_echo "#define JEMALLOC_EXPORT /**/" >>confdefs.h + +fi + +fi + + +public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_message malloc_stats_print malloc_usable_size mallocx smallocx_${jemalloc_version_gid} nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx" +ac_fn_c_check_func "$LINENO" "memalign" "ac_cv_func_memalign" +if test "x$ac_cv_func_memalign" = xyes; then : + $as_echo "#define JEMALLOC_OVERRIDE_MEMALIGN " >>confdefs.h + + public_syms="${public_syms} memalign" +fi + +ac_fn_c_check_func "$LINENO" "valloc" "ac_cv_func_valloc" +if test "x$ac_cv_func_valloc" = xyes; then : + $as_echo "#define JEMALLOC_OVERRIDE_VALLOC " >>confdefs.h + + public_syms="${public_syms} valloc" +fi + + +wrap_syms= +if test "x${JEMALLOC_PREFIX}" = "x" ; then + ac_fn_c_check_func "$LINENO" "__libc_calloc" "ac_cv_func___libc_calloc" +if test "x$ac_cv_func___libc_calloc" = xyes; then : + $as_echo "#define JEMALLOC_OVERRIDE___LIBC_CALLOC " >>confdefs.h + + wrap_syms="${wrap_syms} __libc_calloc" +fi + + ac_fn_c_check_func "$LINENO" "__libc_free" "ac_cv_func___libc_free" +if test "x$ac_cv_func___libc_free" = xyes; then : + $as_echo "#define JEMALLOC_OVERRIDE___LIBC_FREE " >>confdefs.h + + wrap_syms="${wrap_syms} __libc_free" +fi + + ac_fn_c_check_func "$LINENO" "__libc_malloc" "ac_cv_func___libc_malloc" +if test "x$ac_cv_func___libc_malloc" = xyes; then : + $as_echo "#define JEMALLOC_OVERRIDE___LIBC_MALLOC " >>confdefs.h + + wrap_syms="${wrap_syms} __libc_malloc" +fi + + ac_fn_c_check_func "$LINENO" "__libc_memalign" "ac_cv_func___libc_memalign" +if test "x$ac_cv_func___libc_memalign" = xyes; then : + $as_echo "#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN " >>confdefs.h + + wrap_syms="${wrap_syms} __libc_memalign" +fi + + ac_fn_c_check_func "$LINENO" "__libc_realloc" "ac_cv_func___libc_realloc" +if test "x$ac_cv_func___libc_realloc" = xyes; then : + $as_echo "#define JEMALLOC_OVERRIDE___LIBC_REALLOC " >>confdefs.h + + wrap_syms="${wrap_syms} __libc_realloc" +fi + + ac_fn_c_check_func "$LINENO" "__libc_valloc" "ac_cv_func___libc_valloc" +if test "x$ac_cv_func___libc_valloc" = xyes; then : + $as_echo "#define JEMALLOC_OVERRIDE___LIBC_VALLOC " >>confdefs.h + + wrap_syms="${wrap_syms} __libc_valloc" +fi + + ac_fn_c_check_func "$LINENO" "__posix_memalign" "ac_cv_func___posix_memalign" +if test "x$ac_cv_func___posix_memalign" = xyes; then : + $as_echo "#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN " >>confdefs.h + + wrap_syms="${wrap_syms} __posix_memalign" +fi + +fi + +case "${host}" in + *-*-mingw* | *-*-cygwin*) + wrap_syms="${wrap_syms} tls_callback" + ;; + *) + ;; +esac + + +# Check whether --with-private_namespace was given. +if test "${with_private_namespace+set}" = set; then : + withval=$with_private_namespace; JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_" +else + JEMALLOC_PRIVATE_NAMESPACE="je_" + +fi + +cat >>confdefs.h <<_ACEOF +#define JEMALLOC_PRIVATE_NAMESPACE $JEMALLOC_PRIVATE_NAMESPACE +_ACEOF + +private_namespace="$JEMALLOC_PRIVATE_NAMESPACE" + + + +# Check whether --with-install_suffix was given. +if test "${with_install_suffix+set}" = set; then : + withval=$with_install_suffix; INSTALL_SUFFIX="$with_install_suffix" +else + INSTALL_SUFFIX= + +fi + +install_suffix="$INSTALL_SUFFIX" + + + +# Check whether --with-malloc_conf was given. +if test "${with_malloc_conf+set}" = set; then : + withval=$with_malloc_conf; JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf" +else + JEMALLOC_CONFIG_MALLOC_CONF="" + +fi + +config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF" +cat >>confdefs.h <<_ACEOF +#define JEMALLOC_CONFIG_MALLOC_CONF "$config_malloc_conf" +_ACEOF + + +je_="je_" + + +cfgoutputs_in="Makefile.in" +cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in" +cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in" +cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in" +cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in" +cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in" +cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in" +cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in" +cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_preamble.h.in" +cfgoutputs_in="${cfgoutputs_in} test/test.sh.in" +cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in" + +cfgoutputs_out="Makefile" +cfgoutputs_out="${cfgoutputs_out} jemalloc.pc" +cfgoutputs_out="${cfgoutputs_out} doc/html.xsl" +cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl" +cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_preamble.h" +cfgoutputs_out="${cfgoutputs_out} test/test.sh" +cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h" + +cfgoutputs_tup="Makefile" +cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in" +cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in" +cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in" +cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_preamble.h" +cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in" +cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in" + +cfghdrs_in="include/jemalloc/jemalloc_defs.h.in" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh" +cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in" + +cfghdrs_out="include/jemalloc/jemalloc_defs.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols.awk" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols_jet.awk" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h" +cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h" + +cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in" +cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in" +cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in" + + +# Check whether --enable-debug was given. +if test "${enable_debug+set}" = set; then : + enableval=$enable_debug; if test "x$enable_debug" = "xno" ; then + enable_debug="0" +else + enable_debug="1" +fi + +else + enable_debug="0" + +fi + +if test "x$enable_debug" = "x1" ; then + $as_echo "#define JEMALLOC_DEBUG " >>confdefs.h + +fi +if test "x$enable_debug" = "x1" ; then + $as_echo "#define JEMALLOC_DEBUG " >>confdefs.h + +fi + + +if test "x$enable_debug" = "x0" ; then + if test "x$GCC" = "xyes" ; then + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O3" >&5 +$as_echo_n "checking whether compiler supports -O3... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-O3 + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-O3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O3" >&5 +$as_echo_n "checking whether compiler supports -O3... " >&6; } +T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" +T_APPEND_V=-O3 + if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" +else + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + je_cv_cxxflags_added=-O3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cxxflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -funroll-loops" >&5 +$as_echo_n "checking whether compiler supports -funroll-loops... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-funroll-loops + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-funroll-loops + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + elif test "x$je_cv_msvc" = "xyes" ; then + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O2" >&5 +$as_echo_n "checking whether compiler supports -O2... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-O2 + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-O2 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O2" >&5 +$as_echo_n "checking whether compiler supports -O2... " >&6; } +T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" +T_APPEND_V=-O2 + if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" +else + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + je_cv_cxxflags_added=-O2 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cxxflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + + + else + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O" >&5 +$as_echo_n "checking whether compiler supports -O... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-O + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-O + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O" >&5 +$as_echo_n "checking whether compiler supports -O... " >&6; } +T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" +T_APPEND_V=-O + if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}" +else + CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + je_cv_cxxflags_added=-O + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cxxflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then + CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}" +else + CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}" +fi + + + fi +fi + +# Check whether --enable-stats was given. +if test "${enable_stats+set}" = set; then : + enableval=$enable_stats; if test "x$enable_stats" = "xno" ; then + enable_stats="0" +else + enable_stats="1" +fi + +else + enable_stats="1" + +fi + +if test "x$enable_stats" = "x1" ; then + $as_echo "#define JEMALLOC_STATS " >>confdefs.h + +fi + + +# Check whether --enable-experimental_smallocx was given. +if test "${enable_experimental_smallocx+set}" = set; then : + enableval=$enable_experimental_smallocx; if test "x$enable_experimental_smallocx" = "xno" ; then +enable_experimental_smallocx="0" +else +enable_experimental_smallocx="1" +fi + +else + enable_experimental_smallocx="0" + +fi + +if test "x$enable_experimental_smallocx" = "x1" ; then + $as_echo "#define JEMALLOC_EXPERIMENTAL_SMALLOCX_API 1" >>confdefs.h + +fi + + +# Check whether --enable-prof was given. +if test "${enable_prof+set}" = set; then : + enableval=$enable_prof; if test "x$enable_prof" = "xno" ; then + enable_prof="0" +else + enable_prof="1" +fi + +else + enable_prof="0" + +fi + +if test "x$enable_prof" = "x1" ; then + backtrace_method="" +else + backtrace_method="N/A" +fi + +# Check whether --enable-prof-libunwind was given. +if test "${enable_prof_libunwind+set}" = set; then : + enableval=$enable_prof_libunwind; if test "x$enable_prof_libunwind" = "xno" ; then + enable_prof_libunwind="0" +else + enable_prof_libunwind="1" +fi + +else + enable_prof_libunwind="0" + +fi + + +# Check whether --with-static_libunwind was given. +if test "${with_static_libunwind+set}" = set; then : + withval=$with_static_libunwind; if test "x$with_static_libunwind" = "xno" ; then + LUNWIND="-lunwind" +else + if test ! -f "$with_static_libunwind" ; then + as_fn_error $? "Static libunwind not found: $with_static_libunwind" "$LINENO" 5 + fi + LUNWIND="$with_static_libunwind" +fi +else + LUNWIND="-lunwind" + +fi + +if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then + for ac_header in libunwind.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "libunwind.h" "ac_cv_header_libunwind_h" "$ac_includes_default" +if test "x$ac_cv_header_libunwind_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBUNWIND_H 1 +_ACEOF + +else + enable_prof_libunwind="0" +fi + +done + + if test "x$LUNWIND" = "x-lunwind" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for unw_backtrace in -lunwind" >&5 +$as_echo_n "checking for unw_backtrace in -lunwind... " >&6; } +if ${ac_cv_lib_unwind_unw_backtrace+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lunwind $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char unw_backtrace (); +int +main () +{ +return unw_backtrace (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_unwind_unw_backtrace=yes +else + ac_cv_lib_unwind_unw_backtrace=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_unwind_unw_backtrace" >&5 +$as_echo "$ac_cv_lib_unwind_unw_backtrace" >&6; } +if test "x$ac_cv_lib_unwind_unw_backtrace" = xyes; then : + T_APPEND_V=$LUNWIND + if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + LIBS="${LIBS}${T_APPEND_V}" +else + LIBS="${LIBS} ${T_APPEND_V}" +fi + + +else + enable_prof_libunwind="0" +fi + + else + T_APPEND_V=$LUNWIND + if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + LIBS="${LIBS}${T_APPEND_V}" +else + LIBS="${LIBS} ${T_APPEND_V}" +fi + + + fi + if test "x${enable_prof_libunwind}" = "x1" ; then + backtrace_method="libunwind" + $as_echo "#define JEMALLOC_PROF_LIBUNWIND " >>confdefs.h + + fi +fi + +# Check whether --enable-prof-libgcc was given. +if test "${enable_prof_libgcc+set}" = set; then : + enableval=$enable_prof_libgcc; if test "x$enable_prof_libgcc" = "xno" ; then + enable_prof_libgcc="0" +else + enable_prof_libgcc="1" +fi + +else + enable_prof_libgcc="1" + +fi + +if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \ + -a "x$GCC" = "xyes" ; then + for ac_header in unwind.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "unwind.h" "ac_cv_header_unwind_h" "$ac_includes_default" +if test "x$ac_cv_header_unwind_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_UNWIND_H 1 +_ACEOF + +else + enable_prof_libgcc="0" +fi + +done + + if test "x${enable_prof_libgcc}" = "x1" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _Unwind_Backtrace in -lgcc" >&5 +$as_echo_n "checking for _Unwind_Backtrace in -lgcc... " >&6; } +if ${ac_cv_lib_gcc__Unwind_Backtrace+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lgcc $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char _Unwind_Backtrace (); +int +main () +{ +return _Unwind_Backtrace (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_gcc__Unwind_Backtrace=yes +else + ac_cv_lib_gcc__Unwind_Backtrace=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gcc__Unwind_Backtrace" >&5 +$as_echo "$ac_cv_lib_gcc__Unwind_Backtrace" >&6; } +if test "x$ac_cv_lib_gcc__Unwind_Backtrace" = xyes; then : + T_APPEND_V=-lgcc + if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + LIBS="${LIBS}${T_APPEND_V}" +else + LIBS="${LIBS} ${T_APPEND_V}" +fi + + +else + enable_prof_libgcc="0" +fi + + fi + if test "x${enable_prof_libgcc}" = "x1" ; then + backtrace_method="libgcc" + $as_echo "#define JEMALLOC_PROF_LIBGCC " >>confdefs.h + + fi +else + enable_prof_libgcc="0" +fi + +# Check whether --enable-prof-gcc was given. +if test "${enable_prof_gcc+set}" = set; then : + enableval=$enable_prof_gcc; if test "x$enable_prof_gcc" = "xno" ; then + enable_prof_gcc="0" +else + enable_prof_gcc="1" +fi + +else + enable_prof_gcc="1" + +fi + +if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \ + -a "x$GCC" = "xyes" ; then + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fno-omit-frame-pointer" >&5 +$as_echo_n "checking whether compiler supports -fno-omit-frame-pointer... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-fno-omit-frame-pointer + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-fno-omit-frame-pointer + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + backtrace_method="gcc intrinsics" + $as_echo "#define JEMALLOC_PROF_GCC " >>confdefs.h + +else + enable_prof_gcc="0" +fi + +if test "x$backtrace_method" = "x" ; then + backtrace_method="none (disabling profiling)" + enable_prof="0" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking configured backtracing method" >&5 +$as_echo_n "checking configured backtracing method... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $backtrace_method" >&5 +$as_echo "$backtrace_method" >&6; } +if test "x$enable_prof" = "x1" ; then + T_APPEND_V=$LM + if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + LIBS="${LIBS}${T_APPEND_V}" +else + LIBS="${LIBS} ${T_APPEND_V}" +fi + + + + $as_echo "#define JEMALLOC_PROF " >>confdefs.h + +fi + + +if test "x${maps_coalesce}" = "x1" ; then + $as_echo "#define JEMALLOC_MAPS_COALESCE " >>confdefs.h + +fi + +if test "x$default_retain" = "x1" ; then + $as_echo "#define JEMALLOC_RETAIN " >>confdefs.h + +fi + +have_dss="1" +ac_fn_c_check_func "$LINENO" "sbrk" "ac_cv_func_sbrk" +if test "x$ac_cv_func_sbrk" = xyes; then : + have_sbrk="1" +else + have_sbrk="0" +fi + +if test "x$have_sbrk" = "x1" ; then + if test "x$sbrk_deprecated" = "x1" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Disabling dss allocation because sbrk is deprecated" >&5 +$as_echo "Disabling dss allocation because sbrk is deprecated" >&6; } + have_dss="0" + fi +else + have_dss="0" +fi + +if test "x$have_dss" = "x1" ; then + $as_echo "#define JEMALLOC_DSS " >>confdefs.h + +fi + +# Check whether --enable-fill was given. +if test "${enable_fill+set}" = set; then : + enableval=$enable_fill; if test "x$enable_fill" = "xno" ; then + enable_fill="0" +else + enable_fill="1" +fi + +else + enable_fill="1" + +fi + +if test "x$enable_fill" = "x1" ; then + $as_echo "#define JEMALLOC_FILL " >>confdefs.h + +fi + + +# Check whether --enable-utrace was given. +if test "${enable_utrace+set}" = set; then : + enableval=$enable_utrace; if test "x$enable_utrace" = "xno" ; then + enable_utrace="0" +else + enable_utrace="1" +fi + +else + enable_utrace="0" + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether utrace(2) is compilable" >&5 +$as_echo_n "checking whether utrace(2) is compilable... " >&6; } +if ${je_cv_utrace+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include +#include +#include +#include + +int +main () +{ + + utrace((void *)0, 0); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_utrace=yes +else + je_cv_utrace=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_utrace" >&5 +$as_echo "$je_cv_utrace" >&6; } + +if test "x${je_cv_utrace}" = "xno" ; then + enable_utrace="0" +fi +if test "x$enable_utrace" = "x1" ; then + $as_echo "#define JEMALLOC_UTRACE " >>confdefs.h + +fi + + +# Check whether --enable-xmalloc was given. +if test "${enable_xmalloc+set}" = set; then : + enableval=$enable_xmalloc; if test "x$enable_xmalloc" = "xno" ; then + enable_xmalloc="0" +else + enable_xmalloc="1" +fi + +else + enable_xmalloc="0" + +fi + +if test "x$enable_xmalloc" = "x1" ; then + $as_echo "#define JEMALLOC_XMALLOC " >>confdefs.h + +fi + + +# Check whether --enable-cache-oblivious was given. +if test "${enable_cache_oblivious+set}" = set; then : + enableval=$enable_cache_oblivious; if test "x$enable_cache_oblivious" = "xno" ; then + enable_cache_oblivious="0" +else + enable_cache_oblivious="1" +fi + +else + enable_cache_oblivious="1" + +fi + +if test "x$enable_cache_oblivious" = "x1" ; then + $as_echo "#define JEMALLOC_CACHE_OBLIVIOUS " >>confdefs.h + +fi + + +# Check whether --enable-log was given. +if test "${enable_log+set}" = set; then : + enableval=$enable_log; if test "x$enable_log" = "xno" ; then + enable_log="0" +else + enable_log="1" +fi + +else + enable_log="0" + +fi + +if test "x$enable_log" = "x1" ; then + $as_echo "#define JEMALLOC_LOG " >>confdefs.h + +fi + + +# Check whether --enable-readlinkat was given. +if test "${enable_readlinkat+set}" = set; then : + enableval=$enable_readlinkat; if test "x$enable_readlinkat" = "xno" ; then + enable_readlinkat="0" +else + enable_readlinkat="1" +fi + +else + enable_readlinkat="0" + +fi + +if test "x$enable_readlinkat" = "x1" ; then + $as_echo "#define JEMALLOC_READLINKAT " >>confdefs.h + +fi + + +# Check whether --enable-opt-safety-checks was given. +if test "${enable_opt_safety_checks+set}" = set; then : + enableval=$enable_opt_safety_checks; if test "x$enable_opt_safety_checks" = "xno" ; then + enable_opt_safety_checks="0" +else + enable_opt_safety_checks="1" +fi + +else + enable_opt_safety_checks="0" + +fi + +if test "x$enable_opt_safety_checks" = "x1" ; then + $as_echo "#define JEMALLOC_OPT_SAFETY_CHECKS " >>confdefs.h + +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_unreachable is compilable" >&5 +$as_echo_n "checking whether a program using __builtin_unreachable is compilable... " >&6; } +if ${je_cv_gcc_builtin_unreachable+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +void foo (void) { + __builtin_unreachable(); +} + +int +main () +{ + + { + foo(); + } + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_gcc_builtin_unreachable=yes +else + je_cv_gcc_builtin_unreachable=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_unreachable" >&5 +$as_echo "$je_cv_gcc_builtin_unreachable" >&6; } + +if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then + $as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable" >>confdefs.h + +else + $as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE abort" >>confdefs.h + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_ffsl is compilable" >&5 +$as_echo_n "checking whether a program using __builtin_ffsl is compilable... " >&6; } +if ${je_cv_gcc_builtin_ffsl+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include +#include + +int +main () +{ + + { + int rv = __builtin_ffsl(0x08); + printf("%d\n", rv); + } + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_gcc_builtin_ffsl=yes +else + je_cv_gcc_builtin_ffsl=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_ffsl" >&5 +$as_echo "$je_cv_gcc_builtin_ffsl" >&6; } + +if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then + $as_echo "#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll" >>confdefs.h + + $as_echo "#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl" >>confdefs.h + + $as_echo "#define JEMALLOC_INTERNAL_FFS __builtin_ffs" >>confdefs.h + +else + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using ffsl is compilable" >&5 +$as_echo_n "checking whether a program using ffsl is compilable... " >&6; } +if ${je_cv_function_ffsl+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #include + #include + +int +main () +{ + + { + int rv = ffsl(0x08); + printf("%d\n", rv); + } + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_function_ffsl=yes +else + je_cv_function_ffsl=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_function_ffsl" >&5 +$as_echo "$je_cv_function_ffsl" >&6; } + + if test "x${je_cv_function_ffsl}" = "xyes" ; then + $as_echo "#define JEMALLOC_INTERNAL_FFSLL ffsll" >>confdefs.h + + $as_echo "#define JEMALLOC_INTERNAL_FFSL ffsl" >>confdefs.h + + $as_echo "#define JEMALLOC_INTERNAL_FFS ffs" >>confdefs.h + + else + as_fn_error $? "Cannot build without ffsl(3) or __builtin_ffsl()" "$LINENO" 5 + fi +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_popcountl is compilable" >&5 +$as_echo_n "checking whether a program using __builtin_popcountl is compilable... " >&6; } +if ${je_cv_gcc_builtin_popcountl+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include +#include + +int +main () +{ + + { + int rv = __builtin_popcountl(0x08); + printf("%d\n", rv); + } + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_gcc_builtin_popcountl=yes +else + je_cv_gcc_builtin_popcountl=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_popcountl" >&5 +$as_echo "$je_cv_gcc_builtin_popcountl" >&6; } + +if test "x${je_cv_gcc_builtin_popcountl}" = "xyes" ; then + $as_echo "#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount" >>confdefs.h + + $as_echo "#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl" >>confdefs.h + +fi + + +# Check whether --with-lg_quantum was given. +if test "${with_lg_quantum+set}" = set; then : + withval=$with_lg_quantum; LG_QUANTA="$with_lg_quantum" +else + LG_QUANTA="3 4" +fi + +if test "x$with_lg_quantum" != "x" ; then + cat >>confdefs.h <<_ACEOF +#define LG_QUANTUM $with_lg_quantum +_ACEOF + +fi + + +# Check whether --with-lg_page was given. +if test "${with_lg_page+set}" = set; then : + withval=$with_lg_page; LG_PAGE="$with_lg_page" +else + LG_PAGE="detect" +fi + +if test "x$LG_PAGE" = "xdetect"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking LG_PAGE" >&5 +$as_echo_n "checking LG_PAGE... " >&6; } +if ${je_cv_lg_page+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + je_cv_lg_page=12 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#ifdef _WIN32 +#include +#else +#include +#endif +#include + +int +main () +{ + + int result; + FILE *f; + +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + result = si.dwPageSize; +#else + result = sysconf(_SC_PAGESIZE); +#endif + if (result == -1) { + return 1; + } + result = JEMALLOC_INTERNAL_FFSL(result) - 1; + + f = fopen("conftest.out", "w"); + if (f == NULL) { + return 1; + } + fprintf(f, "%d", result); + fclose(f); + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + je_cv_lg_page=`cat conftest.out` +else + je_cv_lg_page=undefined +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_lg_page" >&5 +$as_echo "$je_cv_lg_page" >&6; } +fi +if test "x${je_cv_lg_page}" != "x" ; then + LG_PAGE="${je_cv_lg_page}" +fi +if test "x${LG_PAGE}" != "xundefined" ; then + cat >>confdefs.h <<_ACEOF +#define LG_PAGE $LG_PAGE +_ACEOF + +else + as_fn_error $? "cannot determine value for LG_PAGE" "$LINENO" 5 +fi + + +# Check whether --with-lg_hugepage was given. +if test "${with_lg_hugepage+set}" = set; then : + withval=$with_lg_hugepage; je_cv_lg_hugepage="${with_lg_hugepage}" +else + je_cv_lg_hugepage="" +fi + +if test "x${je_cv_lg_hugepage}" = "x" ; then + if test -e "/proc/meminfo" ; then + hpsk=`cat /proc/meminfo 2>/dev/null | \ + grep -e '^Hugepagesize:[[:space:]]\+[0-9]\+[[:space:]]kB$' | \ + awk '{print $2}'` + if test "x${hpsk}" != "x" ; then + je_cv_lg_hugepage=10 + while test "${hpsk}" -gt 1 ; do + hpsk="$((hpsk / 2))" + je_cv_lg_hugepage="$((je_cv_lg_hugepage + 1))" + done + fi + fi + + if test "x${je_cv_lg_hugepage}" = "x" ; then + je_cv_lg_hugepage=21 + fi +fi +if test "x${LG_PAGE}" != "xundefined" -a \ + "${je_cv_lg_hugepage}" -lt "${LG_PAGE}" ; then + as_fn_error $? "Huge page size (2^${je_cv_lg_hugepage}) must be at least page size (2^${LG_PAGE})" "$LINENO" 5 +fi +cat >>confdefs.h <<_ACEOF +#define LG_HUGEPAGE ${je_cv_lg_hugepage} +_ACEOF + + +# Check whether --enable-libdl was given. +if test "${enable_libdl+set}" = set; then : + enableval=$enable_libdl; if test "x$enable_libdl" = "xno" ; then + enable_libdl="0" +else + enable_libdl="1" +fi + +else + enable_libdl="1" + +fi + + + + +if test "x$abi" != "xpecoff" ; then + $as_echo "#define JEMALLOC_HAVE_PTHREAD " >>confdefs.h + + for ac_header in pthread.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "pthread.h" "ac_cv_header_pthread_h" "$ac_includes_default" +if test "x$ac_cv_header_pthread_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_PTHREAD_H 1 +_ACEOF + +else + as_fn_error $? "pthread.h is missing" "$LINENO" 5 +fi + +done + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5 +$as_echo_n "checking for pthread_create in -lpthread... " >&6; } +if ${ac_cv_lib_pthread_pthread_create+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpthread $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_create (); +int +main () +{ +return pthread_create (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_pthread_pthread_create=yes +else + ac_cv_lib_pthread_pthread_create=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5 +$as_echo "$ac_cv_lib_pthread_pthread_create" >&6; } +if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then : + T_APPEND_V=-pthread + if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + LIBS="${LIBS}${T_APPEND_V}" +else + LIBS="${LIBS} ${T_APPEND_V}" +fi + + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing pthread_create" >&5 +$as_echo_n "checking for library containing pthread_create... " >&6; } +if ${ac_cv_search_pthread_create+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_create (); +int +main () +{ +return pthread_create (); + ; + return 0; +} +_ACEOF +for ac_lib in '' ; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_pthread_create=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_pthread_create+:} false; then : + break +fi +done +if ${ac_cv_search_pthread_create+:} false; then : + +else + ac_cv_search_pthread_create=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_pthread_create" >&5 +$as_echo "$ac_cv_search_pthread_create" >&6; } +ac_res=$ac_cv_search_pthread_create +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +else + as_fn_error $? "libpthread is missing" "$LINENO" 5 +fi + +fi + + wrap_syms="${wrap_syms} pthread_create" + have_pthread="1" + + if test "x$enable_libdl" = "x1" ; then + have_dlsym="1" + for ac_header in dlfcn.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default" +if test "x$ac_cv_header_dlfcn_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_DLFCN_H 1 +_ACEOF + ac_fn_c_check_func "$LINENO" "dlsym" "ac_cv_func_dlsym" +if test "x$ac_cv_func_dlsym" = xyes; then : + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlsym in -ldl" >&5 +$as_echo_n "checking for dlsym in -ldl... " >&6; } +if ${ac_cv_lib_dl_dlsym+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlsym (); +int +main () +{ +return dlsym (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlsym=yes +else + ac_cv_lib_dl_dlsym=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlsym" >&5 +$as_echo "$ac_cv_lib_dl_dlsym" >&6; } +if test "x$ac_cv_lib_dl_dlsym" = xyes; then : + LIBS="$LIBS -ldl" +else + have_dlsym="0" +fi + +fi + +else + have_dlsym="0" +fi + +done + + if test "x$have_dlsym" = "x1" ; then + $as_echo "#define JEMALLOC_HAVE_DLSYM " >>confdefs.h + + fi + else + have_dlsym="0" + fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthread_atfork(3) is compilable" >&5 +$as_echo_n "checking whether pthread_atfork(3) is compilable... " >&6; } +if ${je_cv_pthread_atfork+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + pthread_atfork((void *)0, (void *)0, (void *)0); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_pthread_atfork=yes +else + je_cv_pthread_atfork=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_atfork" >&5 +$as_echo "$je_cv_pthread_atfork" >&6; } + + if test "x${je_cv_pthread_atfork}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_PTHREAD_ATFORK " >>confdefs.h + + fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthread_setname_np(3) is compilable" >&5 +$as_echo_n "checking whether pthread_setname_np(3) is compilable... " >&6; } +if ${je_cv_pthread_setname_np+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + pthread_setname_np(pthread_self(), "setname_test"); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_pthread_setname_np=yes +else + je_cv_pthread_setname_np=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_setname_np" >&5 +$as_echo "$je_cv_pthread_setname_np" >&6; } + + if test "x${je_cv_pthread_setname_np}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP " >>confdefs.h + + fi +fi + +T_APPEND_V=-D_REENTRANT + if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CPPFLAGS="${CPPFLAGS}${T_APPEND_V}" +else + CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5 +$as_echo_n "checking for library containing clock_gettime... " >&6; } +if ${ac_cv_search_clock_gettime+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char clock_gettime (); +int +main () +{ +return clock_gettime (); + ; + return 0; +} +_ACEOF +for ac_lib in '' rt; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_clock_gettime=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_clock_gettime+:} false; then : + break +fi +done +if ${ac_cv_search_clock_gettime+:} false; then : + +else + ac_cv_search_clock_gettime=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5 +$as_echo "$ac_cv_search_clock_gettime" >&6; } +ac_res=$ac_cv_search_clock_gettime +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + + +if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then + if test "$ac_cv_search_clock_gettime" != "-lrt"; then + SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" + + + unset ac_cv_search_clock_gettime + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -dynamic" >&5 +$as_echo_n "checking whether compiler supports -dynamic... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-dynamic + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-dynamic + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5 +$as_echo_n "checking for library containing clock_gettime... " >&6; } +if ${ac_cv_search_clock_gettime+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char clock_gettime (); +int +main () +{ +return clock_gettime (); + ; + return 0; +} +_ACEOF +for ac_lib in '' rt; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_clock_gettime=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_clock_gettime+:} false; then : + break +fi +done +if ${ac_cv_search_clock_gettime+:} false; then : + +else + ac_cv_search_clock_gettime=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5 +$as_echo "$ac_cv_search_clock_gettime" >&6; } +ac_res=$ac_cv_search_clock_gettime +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + + + CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + fi +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable" >&5 +$as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable... " >&6; } +if ${je_cv_clock_monotonic_coarse+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_clock_monotonic_coarse=yes +else + je_cv_clock_monotonic_coarse=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic_coarse" >&5 +$as_echo "$je_cv_clock_monotonic_coarse" >&6; } + +if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1" >>confdefs.h + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable" >&5 +$as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable... " >&6; } +if ${je_cv_clock_monotonic+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include + +int +main () +{ + + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); +#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0 +# error _POSIX_MONOTONIC_CLOCK missing/invalid +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_clock_monotonic=yes +else + je_cv_clock_monotonic=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic" >&5 +$as_echo "$je_cv_clock_monotonic" >&6; } + +if test "x${je_cv_clock_monotonic}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1" >>confdefs.h + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mach_absolute_time() is compilable" >&5 +$as_echo_n "checking whether mach_absolute_time() is compilable... " >&6; } +if ${je_cv_mach_absolute_time+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + mach_absolute_time(); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_mach_absolute_time=yes +else + je_cv_mach_absolute_time=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_mach_absolute_time" >&5 +$as_echo "$je_cv_mach_absolute_time" >&6; } + +if test "x${je_cv_mach_absolute_time}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1" >>confdefs.h + +fi + +# Check whether --enable-syscall was given. +if test "${enable_syscall+set}" = set; then : + enableval=$enable_syscall; if test "x$enable_syscall" = "xno" ; then + enable_syscall="0" +else + enable_syscall="1" +fi + +else + enable_syscall="1" + +fi + +if test "x$enable_syscall" = "x1" ; then + SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 +$as_echo_n "checking whether compiler supports -Werror... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Werror + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Werror + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether syscall(2) is compilable" >&5 +$as_echo_n "checking whether syscall(2) is compilable... " >&6; } +if ${je_cv_syscall+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include + +int +main () +{ + + syscall(SYS_write, 2, "hello", 5); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_syscall=yes +else + je_cv_syscall=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_syscall" >&5 +$as_echo "$je_cv_syscall" >&6; } + + CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + if test "x$je_cv_syscall" = "xyes" ; then + $as_echo "#define JEMALLOC_USE_SYSCALL " >>confdefs.h + + fi +fi + +ac_fn_c_check_func "$LINENO" "secure_getenv" "ac_cv_func_secure_getenv" +if test "x$ac_cv_func_secure_getenv" = xyes; then : + have_secure_getenv="1" +else + have_secure_getenv="0" + +fi + +if test "x$have_secure_getenv" = "x1" ; then + $as_echo "#define JEMALLOC_HAVE_SECURE_GETENV " >>confdefs.h + +fi + +ac_fn_c_check_func "$LINENO" "sched_getcpu" "ac_cv_func_sched_getcpu" +if test "x$ac_cv_func_sched_getcpu" = xyes; then : + have_sched_getcpu="1" +else + have_sched_getcpu="0" + +fi + +if test "x$have_sched_getcpu" = "x1" ; then + $as_echo "#define JEMALLOC_HAVE_SCHED_GETCPU " >>confdefs.h + +fi + +ac_fn_c_check_func "$LINENO" "sched_setaffinity" "ac_cv_func_sched_setaffinity" +if test "x$ac_cv_func_sched_setaffinity" = xyes; then : + have_sched_setaffinity="1" +else + have_sched_setaffinity="0" + +fi + +if test "x$have_sched_setaffinity" = "x1" ; then + $as_echo "#define JEMALLOC_HAVE_SCHED_SETAFFINITY " >>confdefs.h + +fi + +ac_fn_c_check_func "$LINENO" "issetugid" "ac_cv_func_issetugid" +if test "x$ac_cv_func_issetugid" = xyes; then : + have_issetugid="1" +else + have_issetugid="0" + +fi + +if test "x$have_issetugid" = "x1" ; then + $as_echo "#define JEMALLOC_HAVE_ISSETUGID " >>confdefs.h + +fi + +ac_fn_c_check_func "$LINENO" "_malloc_thread_cleanup" "ac_cv_func__malloc_thread_cleanup" +if test "x$ac_cv_func__malloc_thread_cleanup" = xyes; then : + have__malloc_thread_cleanup="1" +else + have__malloc_thread_cleanup="0" + +fi + +if test "x$have__malloc_thread_cleanup" = "x1" ; then + $as_echo "#define JEMALLOC_MALLOC_THREAD_CLEANUP " >>confdefs.h + + wrap_syms="${wrap_syms} _malloc_thread_cleanup" + force_tls="1" +fi + +ac_fn_c_check_func "$LINENO" "_pthread_mutex_init_calloc_cb" "ac_cv_func__pthread_mutex_init_calloc_cb" +if test "x$ac_cv_func__pthread_mutex_init_calloc_cb" = xyes; then : + have__pthread_mutex_init_calloc_cb="1" +else + have__pthread_mutex_init_calloc_cb="0" + +fi + +if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then + $as_echo "#define JEMALLOC_MUTEX_INIT_CB 1" >>confdefs.h + + wrap_syms="${wrap_syms} _malloc_prefork _malloc_postfork" +fi + +# Check whether --enable-lazy_lock was given. +if test "${enable_lazy_lock+set}" = set; then : + enableval=$enable_lazy_lock; if test "x$enable_lazy_lock" = "xno" ; then + enable_lazy_lock="0" +else + enable_lazy_lock="1" +fi + +else + enable_lazy_lock="" + +fi + +if test "x${enable_lazy_lock}" = "x" ; then + if test "x${force_lazy_lock}" = "x1" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&5 +$as_echo "Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&6; } + enable_lazy_lock="1" + else + enable_lazy_lock="0" + fi +fi +if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no lazy-lock because thread creation monitoring is unimplemented" >&5 +$as_echo "Forcing no lazy-lock because thread creation monitoring is unimplemented" >&6; } + enable_lazy_lock="0" +fi +if test "x$enable_lazy_lock" = "x1" ; then + if test "x$have_dlsym" = "x1" ; then + $as_echo "#define JEMALLOC_LAZY_LOCK " >>confdefs.h + + else + as_fn_error $? "Missing dlsym support: lazy-lock cannot be enabled." "$LINENO" 5 + fi +fi + + +if test "x${force_tls}" = "x1" ; then + enable_tls="1" +elif test "x${force_tls}" = "x0" ; then + enable_tls="0" +else + enable_tls="1" +fi +if test "x${enable_tls}" = "x1" ; then +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for TLS" >&5 +$as_echo_n "checking for TLS... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + __thread int x; + +int +main () +{ + + x = 42; + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + enable_tls="0" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + enable_tls="0" +fi + +if test "x${enable_tls}" = "x1" ; then + cat >>confdefs.h <<_ACEOF +#define JEMALLOC_TLS +_ACEOF + +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C11 atomics is compilable" >&5 +$as_echo_n "checking whether C11 atomics is compilable... " >&6; } +if ${je_cv_c11_atomics+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) +#include +#else +#error Atomics not available +#endif + +int +main () +{ + + uint64_t *p = (uint64_t *)0; + uint64_t x = 1; + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + uint64_t r = atomic_fetch_add(a, x) + x; + return r == 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_c11_atomics=yes +else + je_cv_c11_atomics=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_c11_atomics" >&5 +$as_echo "$je_cv_c11_atomics" >&6; } + +if test "x${je_cv_c11_atomics}" = "xyes" ; then + $as_echo "#define JEMALLOC_C11_ATOMICS 1" >>confdefs.h + +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC __atomic atomics is compilable" >&5 +$as_echo_n "checking whether GCC __atomic atomics is compilable... " >&6; } +if ${je_cv_gcc_atomic_atomics+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + int x = 0; + int val = 1; + int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED); + int after_add = x; + return after_add == 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_gcc_atomic_atomics=yes +else + je_cv_gcc_atomic_atomics=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_atomic_atomics" >&5 +$as_echo "$je_cv_gcc_atomic_atomics" >&6; } + +if test "x${je_cv_gcc_atomic_atomics}" = "xyes" ; then + $as_echo "#define JEMALLOC_GCC_ATOMIC_ATOMICS 1" >>confdefs.h + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC 8-bit __atomic atomics is compilable" >&5 +$as_echo_n "checking whether GCC 8-bit __atomic atomics is compilable... " >&6; } +if ${je_cv_gcc_u8_atomic_atomics+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + unsigned char x = 0; + int val = 1; + int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED); + int after_add = (int)x; + return after_add == 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_gcc_u8_atomic_atomics=yes +else + je_cv_gcc_u8_atomic_atomics=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_u8_atomic_atomics" >&5 +$as_echo "$je_cv_gcc_u8_atomic_atomics" >&6; } + + if test "x${je_cv_gcc_u8_atomic_atomics}" = "xyes" ; then + $as_echo "#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1" >>confdefs.h + + fi +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC __sync atomics is compilable" >&5 +$as_echo_n "checking whether GCC __sync atomics is compilable... " >&6; } +if ${je_cv_gcc_sync_atomics+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + int x = 0; + int before_add = __sync_fetch_and_add(&x, 1); + int after_add = x; + return (before_add == 0) && (after_add == 1); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_gcc_sync_atomics=yes +else + je_cv_gcc_sync_atomics=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_sync_atomics" >&5 +$as_echo "$je_cv_gcc_sync_atomics" >&6; } + +if test "x${je_cv_gcc_sync_atomics}" = "xyes" ; then + $as_echo "#define JEMALLOC_GCC_SYNC_ATOMICS 1" >>confdefs.h + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC 8-bit __sync atomics is compilable" >&5 +$as_echo_n "checking whether GCC 8-bit __sync atomics is compilable... " >&6; } +if ${je_cv_gcc_u8_sync_atomics+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + unsigned char x = 0; + int before_add = __sync_fetch_and_add(&x, 1); + int after_add = (int)x; + return (before_add == 0) && (after_add == 1); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_gcc_u8_sync_atomics=yes +else + je_cv_gcc_u8_sync_atomics=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_u8_sync_atomics" >&5 +$as_echo "$je_cv_gcc_u8_sync_atomics" >&6; } + + if test "x${je_cv_gcc_u8_sync_atomics}" = "xyes" ; then + $as_echo "#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1" >>confdefs.h + + fi +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSAtomic*() is compilable" >&5 +$as_echo_n "checking whether Darwin OSAtomic*() is compilable... " >&6; } +if ${je_cv_osatomic+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include + +int +main () +{ + + { + int32_t x32 = 0; + volatile int32_t *x32p = &x32; + OSAtomicAdd32(1, x32p); + } + { + int64_t x64 = 0; + volatile int64_t *x64p = &x64; + OSAtomicAdd64(1, x64p); + } + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_osatomic=yes +else + je_cv_osatomic=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_osatomic" >&5 +$as_echo "$je_cv_osatomic" >&6; } + +if test "x${je_cv_osatomic}" = "xyes" ; then + $as_echo "#define JEMALLOC_OSATOMIC " >>confdefs.h + +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(2) is compilable" >&5 +$as_echo_n "checking whether madvise(2) is compilable... " >&6; } +if ${je_cv_madvise+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + madvise((void *)0, 0, 0); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_madvise=yes +else + je_cv_madvise=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madvise" >&5 +$as_echo "$je_cv_madvise" >&6; } + +if test "x${je_cv_madvise}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_MADVISE " >>confdefs.h + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_FREE) is compilable" >&5 +$as_echo_n "checking whether madvise(..., MADV_FREE) is compilable... " >&6; } +if ${je_cv_madv_free+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + madvise((void *)0, 0, MADV_FREE); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_madv_free=yes +else + je_cv_madv_free=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_free" >&5 +$as_echo "$je_cv_madv_free" >&6; } + + if test "x${je_cv_madv_free}" = "xyes" ; then + $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h + + elif test "x${je_cv_madvise}" = "xyes" ; then + case "${host_cpu}" in i686|x86_64) + case "${host}" in *-*-linux*) + $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h + + $as_echo "#define JEMALLOC_DEFINE_MADVISE_FREE " >>confdefs.h + + ;; + esac + ;; + esac + fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_DONTNEED) is compilable" >&5 +$as_echo_n "checking whether madvise(..., MADV_DONTNEED) is compilable... " >&6; } +if ${je_cv_madv_dontneed+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + madvise((void *)0, 0, MADV_DONTNEED); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_madv_dontneed=yes +else + je_cv_madv_dontneed=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_dontneed" >&5 +$as_echo "$je_cv_madv_dontneed" >&6; } + + if test "x${je_cv_madv_dontneed}" = "xyes" ; then + $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED " >>confdefs.h + + fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_DO[NT]DUMP) is compilable" >&5 +$as_echo_n "checking whether madvise(..., MADV_DO[NT]DUMP) is compilable... " >&6; } +if ${je_cv_madv_dontdump+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + madvise((void *)0, 0, MADV_DONTDUMP); + madvise((void *)0, 0, MADV_DODUMP); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_madv_dontdump=yes +else + je_cv_madv_dontdump=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_dontdump" >&5 +$as_echo "$je_cv_madv_dontdump" >&6; } + + if test "x${je_cv_madv_dontdump}" = "xyes" ; then + $as_echo "#define JEMALLOC_MADVISE_DONTDUMP " >>confdefs.h + + fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_[NO]HUGEPAGE) is compilable" >&5 +$as_echo_n "checking whether madvise(..., MADV_[NO]HUGEPAGE) is compilable... " >&6; } +if ${je_cv_thp+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + madvise((void *)0, 0, MADV_HUGEPAGE); + madvise((void *)0, 0, MADV_NOHUGEPAGE); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_thp=yes +else + je_cv_thp=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_thp" >&5 +$as_echo "$je_cv_thp" >&6; } + +case "${host_cpu}" in + arm*) + ;; + *) + if test "x${je_cv_thp}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_MADVISE_HUGE " >>confdefs.h + + fi + ;; +esac +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_clz" >&5 +$as_echo_n "checking for __builtin_clz... " >&6; } +if ${je_cv_builtin_clz+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + { + unsigned x = 0; + int y = __builtin_clz(x); + } + { + unsigned long x = 0; + int y = __builtin_clzl(x); + } + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_builtin_clz=yes +else + je_cv_builtin_clz=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_builtin_clz" >&5 +$as_echo "$je_cv_builtin_clz" >&6; } + +if test "x${je_cv_builtin_clz}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_BUILTIN_CLZ " >>confdefs.h + +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin os_unfair_lock_*() is compilable" >&5 +$as_echo_n "checking whether Darwin os_unfair_lock_*() is compilable... " >&6; } +if ${je_cv_os_unfair_lock+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include + +int +main () +{ + + #if MAC_OS_X_VERSION_MIN_REQUIRED < 101200 + #error "os_unfair_lock is not supported" + #else + os_unfair_lock lock = OS_UNFAIR_LOCK_INIT; + os_unfair_lock_lock(&lock); + os_unfair_lock_unlock(&lock); + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_os_unfair_lock=yes +else + je_cv_os_unfair_lock=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_os_unfair_lock" >&5 +$as_echo "$je_cv_os_unfair_lock" >&6; } + +if test "x${je_cv_os_unfair_lock}" = "xyes" ; then + $as_echo "#define JEMALLOC_OS_UNFAIR_LOCK " >>confdefs.h + +fi + + +# Check whether --enable-zone-allocator was given. +if test "${enable_zone_allocator+set}" = set; then : + enableval=$enable_zone_allocator; if test "x$enable_zone_allocator" = "xno" ; then + enable_zone_allocator="0" +else + enable_zone_allocator="1" +fi + +else + if test "x${abi}" = "xmacho"; then + enable_zone_allocator="1" +fi + + +fi + + + +if test "x${enable_zone_allocator}" = "x1" ; then + if test "x${abi}" != "xmacho"; then + as_fn_error $? "--enable-zone-allocator is only supported on Darwin" "$LINENO" 5 + fi + $as_echo "#define JEMALLOC_ZONE " >>confdefs.h + +fi + +# Check whether --enable-initial-exec-tls was given. +if test "${enable_initial_exec_tls+set}" = set; then : + enableval=$enable_initial_exec_tls; if test "x$enable_initial_exec_tls" = "xno" ; then + enable_initial_exec_tls="0" +else + enable_initial_exec_tls="1" +fi + +else + enable_initial_exec_tls="1" + +fi + + + +if test "x${je_cv_tls_model}" = "xyes" -a \ + "x${enable_initial_exec_tls}" = "x1" ; then + $as_echo "#define JEMALLOC_TLS_MODEL __attribute__((tls_model(\"initial-exec\")))" >>confdefs.h + +else + $as_echo "#define JEMALLOC_TLS_MODEL " >>confdefs.h + +fi + + +if test "x${have_pthread}" = "x1" -a "x${je_cv_os_unfair_lock}" != "xyes" ; then + $as_echo "#define JEMALLOC_BACKGROUND_THREAD 1" >>confdefs.h + +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc malloc hook is compilable" >&5 +$as_echo_n "checking whether glibc malloc hook is compilable... " >&6; } +if ${je_cv_glibc_malloc_hook+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +extern void (* __free_hook)(void *ptr); +extern void *(* __malloc_hook)(size_t size); +extern void *(* __realloc_hook)(void *ptr, size_t size); + +int +main () +{ + + void *ptr = 0L; + if (__malloc_hook) ptr = __malloc_hook(1); + if (__realloc_hook) ptr = __realloc_hook(ptr, 2); + if (__free_hook && ptr) __free_hook(ptr); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_glibc_malloc_hook=yes +else + je_cv_glibc_malloc_hook=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_malloc_hook" >&5 +$as_echo "$je_cv_glibc_malloc_hook" >&6; } + +if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then + if test "x${JEMALLOC_PREFIX}" = "x" ; then + $as_echo "#define JEMALLOC_GLIBC_MALLOC_HOOK " >>confdefs.h + + wrap_syms="${wrap_syms} __free_hook __malloc_hook __realloc_hook" + fi +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc memalign hook is compilable" >&5 +$as_echo_n "checking whether glibc memalign hook is compilable... " >&6; } +if ${je_cv_glibc_memalign_hook+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +extern void *(* __memalign_hook)(size_t alignment, size_t size); + +int +main () +{ + + void *ptr = 0L; + if (__memalign_hook) ptr = __memalign_hook(16, 7); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_glibc_memalign_hook=yes +else + je_cv_glibc_memalign_hook=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_memalign_hook" >&5 +$as_echo "$je_cv_glibc_memalign_hook" >&6; } + +if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then + if test "x${JEMALLOC_PREFIX}" = "x" ; then + $as_echo "#define JEMALLOC_GLIBC_MEMALIGN_HOOK " >>confdefs.h + + wrap_syms="${wrap_syms} __memalign_hook" + fi +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads adaptive mutexes is compilable" >&5 +$as_echo_n "checking whether pthreads adaptive mutexes is compilable... " >&6; } +if ${je_cv_pthread_mutex_adaptive_np+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); + pthread_mutexattr_destroy(&attr); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_pthread_mutex_adaptive_np=yes +else + je_cv_pthread_mutex_adaptive_np=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_mutex_adaptive_np" >&5 +$as_echo "$je_cv_pthread_mutex_adaptive_np" >&6; } + +if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP " >>confdefs.h + +fi + +SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -D_GNU_SOURCE" >&5 +$as_echo_n "checking whether compiler supports -D_GNU_SOURCE... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-D_GNU_SOURCE + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-D_GNU_SOURCE + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 +$as_echo_n "checking whether compiler supports -Werror... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-Werror + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-Werror + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 +$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +T_APPEND_V=-herror_on_warning + if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}" +else + CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}" +fi + + +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_added=-herror_on_warning + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_added= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether strerror_r returns char with gnu source is compilable" >&5 +$as_echo_n "checking whether strerror_r returns char with gnu source is compilable... " >&6; } +if ${je_cv_strerror_r_returns_char_with_gnu_source+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include +#include +#include + +int +main () +{ + + char *buffer = (char *) malloc(100); + char *error = strerror_r(EINVAL, buffer, 100); + printf("%s\n", error); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_strerror_r_returns_char_with_gnu_source=yes +else + je_cv_strerror_r_returns_char_with_gnu_source=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_strerror_r_returns_char_with_gnu_source" >&5 +$as_echo "$je_cv_strerror_r_returns_char_with_gnu_source" >&6; } + +CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" +if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then + CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}" +else + CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}" +fi + + +if test "x${je_cv_strerror_r_returns_char_with_gnu_source}" = "xyes" ; then + $as_echo "#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE " >>confdefs.h + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5 +$as_echo_n "checking for stdbool.h that conforms to C99... " >&6; } +if ${ac_cv_header_stdbool_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #ifndef bool + "error: bool is not defined" + #endif + #ifndef false + "error: false is not defined" + #endif + #if false + "error: false is not 0" + #endif + #ifndef true + "error: true is not defined" + #endif + #if true != 1 + "error: true is not 1" + #endif + #ifndef __bool_true_false_are_defined + "error: __bool_true_false_are_defined is not defined" + #endif + + struct s { _Bool s: 1; _Bool t; } s; + + char a[true == 1 ? 1 : -1]; + char b[false == 0 ? 1 : -1]; + char c[__bool_true_false_are_defined == 1 ? 1 : -1]; + char d[(bool) 0.5 == true ? 1 : -1]; + /* See body of main program for 'e'. */ + char f[(_Bool) 0.0 == false ? 1 : -1]; + char g[true]; + char h[sizeof (_Bool)]; + char i[sizeof s.t]; + enum { j = false, k = true, l = false * true, m = true * 256 }; + /* The following fails for + HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ + _Bool n[m]; + char o[sizeof n == m * sizeof n[0] ? 1 : -1]; + char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; + /* Catch a bug in an HP-UX C compiler. See + http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html + http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html + */ + _Bool q = true; + _Bool *pq = &q; + +int +main () +{ + + bool e = &s; + *pq |= q; + *pq |= ! q; + /* Refer to every declared value, to avoid compiler optimizations. */ + return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l + + !m + !n + !o + !p + !q + !pq); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdbool_h=yes +else + ac_cv_header_stdbool_h=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5 +$as_echo "$ac_cv_header_stdbool_h" >&6; } + ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default" +if test "x$ac_cv_type__Bool" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE__BOOL 1 +_ACEOF + + +fi + + +if test $ac_cv_header_stdbool_h = yes; then + +$as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h + +fi + + + +ac_config_commands="$ac_config_commands include/jemalloc/internal/public_symbols.txt" + +ac_config_commands="$ac_config_commands include/jemalloc/internal/private_symbols.awk" + +ac_config_commands="$ac_config_commands include/jemalloc/internal/private_symbols_jet.awk" + +ac_config_commands="$ac_config_commands include/jemalloc/internal/public_namespace.h" + +ac_config_commands="$ac_config_commands include/jemalloc/internal/public_unnamespace.h" + +ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_protos_jet.h" + +ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_rename.h" + +ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle.h" + +ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle_jet.h" + +ac_config_commands="$ac_config_commands include/jemalloc/jemalloc.h" + + + + +ac_config_headers="$ac_config_headers $cfghdrs_tup" + + + +ac_config_files="$ac_config_files $cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof" + + + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + + + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by $as_me, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + +case $ac_config_headers in *" +"*) set x $ac_config_headers; shift; ac_config_headers=$*;; +esac + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" +config_headers="$ac_config_headers" +config_commands="$ac_config_commands" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to the package provider." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_version="\\ +config.status +configured by $0, generated by GNU Autoconf 2.69, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2012 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +AWK='$AWK' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# +# INIT-COMMANDS +# + + srcdir="${srcdir}" + objroot="${objroot}" + mangling_map="${mangling_map}" + public_syms="${public_syms}" + JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" + + + srcdir="${srcdir}" + objroot="${objroot}" + public_syms="${public_syms}" + wrap_syms="${wrap_syms}" + SYM_PREFIX="${SYM_PREFIX}" + JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" + + + srcdir="${srcdir}" + objroot="${objroot}" + public_syms="${public_syms}" + wrap_syms="${wrap_syms}" + SYM_PREFIX="${SYM_PREFIX}" + + + srcdir="${srcdir}" + objroot="${objroot}" + + + srcdir="${srcdir}" + objroot="${objroot}" + + + srcdir="${srcdir}" + objroot="${objroot}" + + + srcdir="${srcdir}" + objroot="${objroot}" + + + srcdir="${srcdir}" + objroot="${objroot}" + + + srcdir="${srcdir}" + objroot="${objroot}" + + + srcdir="${srcdir}" + objroot="${objroot}" + install_suffix="${install_suffix}" + + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "include/jemalloc/internal/public_symbols.txt") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_symbols.txt" ;; + "include/jemalloc/internal/private_symbols.awk") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_symbols.awk" ;; + "include/jemalloc/internal/private_symbols_jet.awk") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_symbols_jet.awk" ;; + "include/jemalloc/internal/public_namespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_namespace.h" ;; + "include/jemalloc/internal/public_unnamespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_unnamespace.h" ;; + "include/jemalloc/jemalloc_protos_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_protos_jet.h" ;; + "include/jemalloc/jemalloc_rename.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_rename.h" ;; + "include/jemalloc/jemalloc_mangle.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle.h" ;; + "include/jemalloc/jemalloc_mangle_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle_jet.h" ;; + "include/jemalloc/jemalloc.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc.h" ;; + "$cfghdrs_tup") CONFIG_HEADERS="$CONFIG_HEADERS $cfghdrs_tup" ;; + "$cfgoutputs_tup") CONFIG_FILES="$CONFIG_FILES $cfgoutputs_tup" ;; + "config.stamp") CONFIG_FILES="$CONFIG_FILES config.stamp" ;; + "bin/jemalloc-config") CONFIG_FILES="$CONFIG_FILES bin/jemalloc-config" ;; + "bin/jemalloc.sh") CONFIG_FILES="$CONFIG_FILES bin/jemalloc.sh" ;; + "bin/jeprof") CONFIG_FILES="$CONFIG_FILES bin/jeprof" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +_ACEOF + + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\)..*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\)..*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +_ACEOF + +# VPATH may cause trouble with some makes, so we remove sole $(srcdir), +# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +_ACEOF + +# Transform confdefs.h into an awk script `defines.awk', embedded as +# here-document in config.status, that substitutes the proper values into +# config.h.in to produce config.h. + +# Create a delimiter string that does not exist in confdefs.h, to ease +# handling of long lines. +ac_delim='%!_!# ' +for ac_last_try in false false :; do + ac_tt=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_tt"; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +# For the awk script, D is an array of macro values keyed by name, +# likewise P contains macro parameters if any. Preserve backslash +# newline sequences. + +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +sed -n ' +s/.\{148\}/&'"$ac_delim"'/g +t rset +:rset +s/^[ ]*#[ ]*define[ ][ ]*/ / +t def +d +:def +s/\\$// +t bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3"/p +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p +d +:bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3\\\\\\n"\\/p +t cont +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p +t cont +d +:cont +n +s/.\{148\}/&'"$ac_delim"'/g +t clear +:clear +s/\\$// +t bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/"/p +d +:bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p +b cont +' >$CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { + line = \$ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi + ;; + + :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +$as_echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "include/jemalloc/internal/public_symbols.txt":C) + f="${objroot}include/jemalloc/internal/public_symbols.txt" + mkdir -p "${objroot}include/jemalloc/internal" + cp /dev/null "${f}" + for nm in `echo ${mangling_map} |tr ',' ' '` ; do + n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` + m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'` + echo "${n}:${m}" >> "${f}" + public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '` + done + for sym in ${public_syms} ; do + n="${sym}" + m="${JEMALLOC_PREFIX}${sym}" + echo "${n}:${m}" >> "${f}" + done + ;; + "include/jemalloc/internal/private_symbols.awk":C) + f="${objroot}include/jemalloc/internal/private_symbols.awk" + mkdir -p "${objroot}include/jemalloc/internal" + export_syms=`for sym in ${public_syms}; do echo "${JEMALLOC_PREFIX}${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` + "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols.awk" + ;; + "include/jemalloc/internal/private_symbols_jet.awk":C) + f="${objroot}include/jemalloc/internal/private_symbols_jet.awk" + mkdir -p "${objroot}include/jemalloc/internal" + export_syms=`for sym in ${public_syms}; do echo "jet_${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` + "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols_jet.awk" + ;; + "include/jemalloc/internal/public_namespace.h":C) + mkdir -p "${objroot}include/jemalloc/internal" + "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h" + ;; + "include/jemalloc/internal/public_unnamespace.h":C) + mkdir -p "${objroot}include/jemalloc/internal" + "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h" + ;; + "include/jemalloc/jemalloc_protos_jet.h":C) + mkdir -p "${objroot}include/jemalloc" + cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h" + ;; + "include/jemalloc/jemalloc_rename.h":C) + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h" + ;; + "include/jemalloc/jemalloc_mangle.h":C) + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h" + ;; + "include/jemalloc/jemalloc_mangle_jet.h":C) + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h" + ;; + "include/jemalloc/jemalloc.h":C) + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h" + ;; + + esac +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5 +$as_echo "===============================================================================" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: jemalloc version : ${jemalloc_version}" >&5 +$as_echo "jemalloc version : ${jemalloc_version}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: library revision : ${rev}" >&5 +$as_echo "library revision : ${rev}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 +$as_echo "" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIG : ${CONFIG}" >&5 +$as_echo "CONFIG : ${CONFIG}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CC : ${CC}" >&5 +$as_echo "CC : ${CC}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}" >&5 +$as_echo "CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}" >&5 +$as_echo "SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_CFLAGS : ${EXTRA_CFLAGS}" >&5 +$as_echo "EXTRA_CFLAGS : ${EXTRA_CFLAGS}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CPPFLAGS : ${CPPFLAGS}" >&5 +$as_echo "CPPFLAGS : ${CPPFLAGS}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CXX : ${CXX}" >&5 +$as_echo "CXX : ${CXX}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}" >&5 +$as_echo "CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}" >&5 +$as_echo "SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_CXXFLAGS : ${EXTRA_CXXFLAGS}" >&5 +$as_echo "EXTRA_CXXFLAGS : ${EXTRA_CXXFLAGS}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LDFLAGS : ${LDFLAGS}" >&5 +$as_echo "LDFLAGS : ${LDFLAGS}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&5 +$as_echo "EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: DSO_LDFLAGS : ${DSO_LDFLAGS}" >&5 +$as_echo "DSO_LDFLAGS : ${DSO_LDFLAGS}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBS : ${LIBS}" >&5 +$as_echo "LIBS : ${LIBS}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: RPATH_EXTRA : ${RPATH_EXTRA}" >&5 +$as_echo "RPATH_EXTRA : ${RPATH_EXTRA}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 +$as_echo "" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLTPROC : ${XSLTPROC}" >&5 +$as_echo "XSLTPROC : ${XSLTPROC}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLROOT : ${XSLROOT}" >&5 +$as_echo "XSLROOT : ${XSLROOT}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 +$as_echo "" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: PREFIX : ${PREFIX}" >&5 +$as_echo "PREFIX : ${PREFIX}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: BINDIR : ${BINDIR}" >&5 +$as_echo "BINDIR : ${BINDIR}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: DATADIR : ${DATADIR}" >&5 +$as_echo "DATADIR : ${DATADIR}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: INCLUDEDIR : ${INCLUDEDIR}" >&5 +$as_echo "INCLUDEDIR : ${INCLUDEDIR}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBDIR : ${LIBDIR}" >&5 +$as_echo "LIBDIR : ${LIBDIR}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: MANDIR : ${MANDIR}" >&5 +$as_echo "MANDIR : ${MANDIR}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 +$as_echo "" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: srcroot : ${srcroot}" >&5 +$as_echo "srcroot : ${srcroot}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_srcroot : ${abs_srcroot}" >&5 +$as_echo "abs_srcroot : ${abs_srcroot}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: objroot : ${objroot}" >&5 +$as_echo "objroot : ${objroot}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_objroot : ${abs_objroot}" >&5 +$as_echo "abs_objroot : ${abs_objroot}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 +$as_echo "" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}" >&5 +$as_echo "JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PRIVATE_NAMESPACE" >&5 +$as_echo "JEMALLOC_PRIVATE_NAMESPACE" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: : ${JEMALLOC_PRIVATE_NAMESPACE}" >&5 +$as_echo " : ${JEMALLOC_PRIVATE_NAMESPACE}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: install_suffix : ${install_suffix}" >&5 +$as_echo "install_suffix : ${install_suffix}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: malloc_conf : ${config_malloc_conf}" >&5 +$as_echo "malloc_conf : ${config_malloc_conf}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: documentation : ${enable_doc}" >&5 +$as_echo "documentation : ${enable_doc}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: shared libs : ${enable_shared}" >&5 +$as_echo "shared libs : ${enable_shared}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: static libs : ${enable_static}" >&5 +$as_echo "static libs : ${enable_static}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: autogen : ${enable_autogen}" >&5 +$as_echo "autogen : ${enable_autogen}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: debug : ${enable_debug}" >&5 +$as_echo "debug : ${enable_debug}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: stats : ${enable_stats}" >&5 +$as_echo "stats : ${enable_stats}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: experimetal_smallocx : ${enable_experimental_smallocx}" >&5 +$as_echo "experimetal_smallocx : ${enable_experimental_smallocx}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof : ${enable_prof}" >&5 +$as_echo "prof : ${enable_prof}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libunwind : ${enable_prof_libunwind}" >&5 +$as_echo "prof-libunwind : ${enable_prof_libunwind}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libgcc : ${enable_prof_libgcc}" >&5 +$as_echo "prof-libgcc : ${enable_prof_libgcc}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-gcc : ${enable_prof_gcc}" >&5 +$as_echo "prof-gcc : ${enable_prof_gcc}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: fill : ${enable_fill}" >&5 +$as_echo "fill : ${enable_fill}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: utrace : ${enable_utrace}" >&5 +$as_echo "utrace : ${enable_utrace}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: xmalloc : ${enable_xmalloc}" >&5 +$as_echo "xmalloc : ${enable_xmalloc}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: log : ${enable_log}" >&5 +$as_echo "log : ${enable_log}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: lazy_lock : ${enable_lazy_lock}" >&5 +$as_echo "lazy_lock : ${enable_lazy_lock}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: cache-oblivious : ${enable_cache_oblivious}" >&5 +$as_echo "cache-oblivious : ${enable_cache_oblivious}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: cxx : ${enable_cxx}" >&5 +$as_echo "cxx : ${enable_cxx}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5 +$as_echo "===============================================================================" >&6; } diff --git a/deps/jemalloc/configure.ac b/deps/jemalloc/configure.ac new file mode 100644 index 0000000..f867172 --- /dev/null +++ b/deps/jemalloc/configure.ac @@ -0,0 +1,2406 @@ +dnl Process this file with autoconf to produce a configure script. +AC_PREREQ(2.68) +AC_INIT([Makefile.in]) + +AC_CONFIG_AUX_DIR([build-aux]) + +dnl ============================================================================ +dnl Custom macro definitions. + +dnl JE_CONCAT_VVV(r, a, b) +dnl +dnl Set $r to the concatenation of $a and $b, with a space separating them iff +dnl both $a and $b are non-empty. +AC_DEFUN([JE_CONCAT_VVV], +if test "x[$]{$2}" = "x" -o "x[$]{$3}" = "x" ; then + $1="[$]{$2}[$]{$3}" +else + $1="[$]{$2} [$]{$3}" +fi +) + +dnl JE_APPEND_VS(a, b) +dnl +dnl Set $a to the concatenation of $a and b, with a space separating them iff +dnl both $a and b are non-empty. +AC_DEFUN([JE_APPEND_VS], + T_APPEND_V=$2 + JE_CONCAT_VVV($1, $1, T_APPEND_V) +) + +CONFIGURE_CFLAGS= +SPECIFIED_CFLAGS="${CFLAGS}" +dnl JE_CFLAGS_ADD(cflag) +dnl +dnl CFLAGS is the concatenation of CONFIGURE_CFLAGS and SPECIFIED_CFLAGS +dnl (ignoring EXTRA_CFLAGS, which does not impact configure tests. This macro +dnl appends to CONFIGURE_CFLAGS and regenerates CFLAGS. +AC_DEFUN([JE_CFLAGS_ADD], +[ +AC_MSG_CHECKING([whether compiler supports $1]) +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +JE_APPEND_VS(CONFIGURE_CFLAGS, $1) +JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM( +[[ +]], [[ + return 0; +]])], + [je_cv_cflags_added=$1] + AC_MSG_RESULT([yes]), + [je_cv_cflags_added=] + AC_MSG_RESULT([no]) + [CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"] +) +JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS) +]) + +dnl JE_CFLAGS_SAVE() +dnl JE_CFLAGS_RESTORE() +dnl +dnl Save/restore CFLAGS. Nesting is not supported. +AC_DEFUN([JE_CFLAGS_SAVE], +SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +) +AC_DEFUN([JE_CFLAGS_RESTORE], +CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" +JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS) +) + +CONFIGURE_CXXFLAGS= +SPECIFIED_CXXFLAGS="${CXXFLAGS}" +dnl JE_CXXFLAGS_ADD(cxxflag) +AC_DEFUN([JE_CXXFLAGS_ADD], +[ +AC_MSG_CHECKING([whether compiler supports $1]) +T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" +JE_APPEND_VS(CONFIGURE_CXXFLAGS, $1) +JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS) +AC_LANG_PUSH([C++]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM( +[[ +]], [[ + return 0; +]])], + [je_cv_cxxflags_added=$1] + AC_MSG_RESULT([yes]), + [je_cv_cxxflags_added=] + AC_MSG_RESULT([no]) + [CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"] +) +AC_LANG_POP([C++]) +JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS) +]) + +dnl JE_COMPILABLE(label, hcode, mcode, rvar) +dnl +dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors +dnl cause failure. +AC_DEFUN([JE_COMPILABLE], +[ +AC_CACHE_CHECK([whether $1 is compilable], + [$4], + [AC_LINK_IFELSE([AC_LANG_PROGRAM([$2], + [$3])], + [$4=yes], + [$4=no])]) +]) + +dnl ============================================================================ + +CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'` +AC_SUBST([CONFIG]) + +dnl Library revision. +rev=2 +AC_SUBST([rev]) + +srcroot=$srcdir +if test "x${srcroot}" = "x." ; then + srcroot="" +else + srcroot="${srcroot}/" +fi +AC_SUBST([srcroot]) +abs_srcroot="`cd \"${srcdir}\"; pwd`/" +AC_SUBST([abs_srcroot]) + +objroot="" +AC_SUBST([objroot]) +abs_objroot="`pwd`/" +AC_SUBST([abs_objroot]) + +dnl Munge install path variables. +if test "x$prefix" = "xNONE" ; then + prefix="/usr/local" +fi +if test "x$exec_prefix" = "xNONE" ; then + exec_prefix=$prefix +fi +PREFIX=$prefix +AC_SUBST([PREFIX]) +BINDIR=`eval echo $bindir` +BINDIR=`eval echo $BINDIR` +AC_SUBST([BINDIR]) +INCLUDEDIR=`eval echo $includedir` +INCLUDEDIR=`eval echo $INCLUDEDIR` +AC_SUBST([INCLUDEDIR]) +LIBDIR=`eval echo $libdir` +LIBDIR=`eval echo $LIBDIR` +AC_SUBST([LIBDIR]) +DATADIR=`eval echo $datadir` +DATADIR=`eval echo $DATADIR` +AC_SUBST([DATADIR]) +MANDIR=`eval echo $mandir` +MANDIR=`eval echo $MANDIR` +AC_SUBST([MANDIR]) + +dnl Support for building documentation. +AC_PATH_PROG([XSLTPROC], [xsltproc], [false], [$PATH]) +if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then + DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl" +elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then + DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets" +else + dnl Documentation building will fail if this default gets used. + DEFAULT_XSLROOT="" +fi +AC_ARG_WITH([xslroot], + [AS_HELP_STRING([--with-xslroot=], [XSL stylesheet root path])], [ +if test "x$with_xslroot" = "xno" ; then + XSLROOT="${DEFAULT_XSLROOT}" +else + XSLROOT="${with_xslroot}" +fi +], + XSLROOT="${DEFAULT_XSLROOT}" +) +if test "x$XSLTPROC" = "xfalse" ; then + XSLROOT="" +fi +AC_SUBST([XSLROOT]) + +dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise, +dnl just prevent autoconf from molesting CFLAGS. +CFLAGS=$CFLAGS +AC_PROG_CC + +if test "x$GCC" != "xyes" ; then + AC_CACHE_CHECK([whether compiler is MSVC], + [je_cv_msvc], + [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], + [ +#ifndef _MSC_VER + int fail[-1]; +#endif +])], + [je_cv_msvc=yes], + [je_cv_msvc=no])]) +fi + +dnl check if a cray prgenv wrapper compiler is being used +je_cv_cray_prgenv_wrapper="" +if test "x${PE_ENV}" != "x" ; then + case "${CC}" in + CC|cc) + je_cv_cray_prgenv_wrapper="yes" + ;; + *) + ;; + esac +fi + +AC_CACHE_CHECK([whether compiler is cray], + [je_cv_cray], + [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], + [ +#ifndef _CRAYC + int fail[-1]; +#endif +])], + [je_cv_cray=yes], + [je_cv_cray=no])]) + +if test "x${je_cv_cray}" = "xyes" ; then + AC_CACHE_CHECK([whether cray compiler version is 8.4], + [je_cv_cray_84], + [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], + [ +#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4) + int fail[-1]; +#endif +])], + [je_cv_cray_84=yes], + [je_cv_cray_84=no])]) +fi + +if test "x$GCC" = "xyes" ; then + JE_CFLAGS_ADD([-std=gnu11]) + if test "x$je_cv_cflags_added" = "x-std=gnu11" ; then + AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) + else + JE_CFLAGS_ADD([-std=gnu99]) + if test "x$je_cv_cflags_added" = "x-std=gnu99" ; then + AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) + fi + fi + JE_CFLAGS_ADD([-Wall]) + JE_CFLAGS_ADD([-Wextra]) + JE_CFLAGS_ADD([-Wshorten-64-to-32]) + JE_CFLAGS_ADD([-Wsign-compare]) + JE_CFLAGS_ADD([-Wundef]) + JE_CFLAGS_ADD([-Wno-format-zero-length]) + JE_CFLAGS_ADD([-pipe]) + JE_CFLAGS_ADD([-g3]) +elif test "x$je_cv_msvc" = "xyes" ; then + CC="$CC -nologo" + JE_CFLAGS_ADD([-Zi]) + JE_CFLAGS_ADD([-MT]) + JE_CFLAGS_ADD([-W3]) + JE_CFLAGS_ADD([-FS]) + JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat) +fi +if test "x$je_cv_cray" = "xyes" ; then + dnl cray compiler 8.4 has an inlining bug + if test "x$je_cv_cray_84" = "xyes" ; then + JE_CFLAGS_ADD([-hipa2]) + JE_CFLAGS_ADD([-hnognu]) + fi + dnl ignore unreachable code warning + JE_CFLAGS_ADD([-hnomessage=128]) + dnl ignore redefinition of "malloc", "free", etc warning + JE_CFLAGS_ADD([-hnomessage=1357]) +fi +AC_SUBST([CONFIGURE_CFLAGS]) +AC_SUBST([SPECIFIED_CFLAGS]) +AC_SUBST([EXTRA_CFLAGS]) +AC_PROG_CPP + +AC_ARG_ENABLE([cxx], + [AS_HELP_STRING([--disable-cxx], [Disable C++ integration])], +if test "x$enable_cxx" = "xno" ; then + enable_cxx="0" +else + enable_cxx="1" +fi +, +enable_cxx="1" +) +if test "x$enable_cxx" = "x1" ; then + dnl Require at least c++14, which is the first version to support sized + dnl deallocation. C++ support is not compiled otherwise. + m4_include([m4/ax_cxx_compile_stdcxx.m4]) + AX_CXX_COMPILE_STDCXX([14], [noext], [optional]) + if test "x${HAVE_CXX14}" = "x1" ; then + JE_CXXFLAGS_ADD([-Wall]) + JE_CXXFLAGS_ADD([-Wextra]) + JE_CXXFLAGS_ADD([-g3]) + + SAVED_LIBS="${LIBS}" + JE_APPEND_VS(LIBS, -lstdc++) + JE_COMPILABLE([libstdc++ linkage], [ +#include +], [[ + int *arr = (int *)malloc(sizeof(int) * 42); + if (arr == NULL) + return 1; +]], [je_cv_libstdcxx]) + if test "x${je_cv_libstdcxx}" = "xno" ; then + LIBS="${SAVED_LIBS}" + fi + else + enable_cxx="0" + fi +fi +AC_SUBST([enable_cxx]) +AC_SUBST([CONFIGURE_CXXFLAGS]) +AC_SUBST([SPECIFIED_CXXFLAGS]) +AC_SUBST([EXTRA_CXXFLAGS]) + +AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0]) +if test "x${ac_cv_big_endian}" = "x1" ; then + AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ]) +fi + +if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then + JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat/C99) +fi + +if test "x${je_cv_msvc}" = "xyes" ; then + LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN + AC_MSG_RESULT([Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit]) +else + AC_CHECK_SIZEOF([void *]) + if test "x${ac_cv_sizeof_void_p}" = "x8" ; then + LG_SIZEOF_PTR=3 + elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then + LG_SIZEOF_PTR=2 + else + AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}]) + fi +fi +AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR]) + +AC_CHECK_SIZEOF([int]) +if test "x${ac_cv_sizeof_int}" = "x8" ; then + LG_SIZEOF_INT=3 +elif test "x${ac_cv_sizeof_int}" = "x4" ; then + LG_SIZEOF_INT=2 +else + AC_MSG_ERROR([Unsupported int size: ${ac_cv_sizeof_int}]) +fi +AC_DEFINE_UNQUOTED([LG_SIZEOF_INT], [$LG_SIZEOF_INT]) + +AC_CHECK_SIZEOF([long]) +if test "x${ac_cv_sizeof_long}" = "x8" ; then + LG_SIZEOF_LONG=3 +elif test "x${ac_cv_sizeof_long}" = "x4" ; then + LG_SIZEOF_LONG=2 +else + AC_MSG_ERROR([Unsupported long size: ${ac_cv_sizeof_long}]) +fi +AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG]) + +AC_CHECK_SIZEOF([long long]) +if test "x${ac_cv_sizeof_long_long}" = "x8" ; then + LG_SIZEOF_LONG_LONG=3 +elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then + LG_SIZEOF_LONG_LONG=2 +else + AC_MSG_ERROR([Unsupported long long size: ${ac_cv_sizeof_long_long}]) +fi +AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG_LONG], [$LG_SIZEOF_LONG_LONG]) + +AC_CHECK_SIZEOF([intmax_t]) +if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then + LG_SIZEOF_INTMAX_T=4 +elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then + LG_SIZEOF_INTMAX_T=3 +elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then + LG_SIZEOF_INTMAX_T=2 +else + AC_MSG_ERROR([Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}]) +fi +AC_DEFINE_UNQUOTED([LG_SIZEOF_INTMAX_T], [$LG_SIZEOF_INTMAX_T]) + +AC_CANONICAL_HOST +dnl CPU-specific settings. +CPU_SPINWAIT="" +case "${host_cpu}" in + i686|x86_64) + HAVE_CPU_SPINWAIT=1 + if test "x${je_cv_msvc}" = "xyes" ; then + AC_CACHE_VAL([je_cv_pause_msvc], + [JE_COMPILABLE([pause instruction MSVC], [], + [[_mm_pause(); return 0;]], + [je_cv_pause_msvc])]) + if test "x${je_cv_pause_msvc}" = "xyes" ; then + CPU_SPINWAIT='_mm_pause()' + fi + else + AC_CACHE_VAL([je_cv_pause], + [JE_COMPILABLE([pause instruction], [], + [[__asm__ volatile("pause"); return 0;]], + [je_cv_pause])]) + if test "x${je_cv_pause}" = "xyes" ; then + CPU_SPINWAIT='__asm__ volatile("pause")' + fi + fi + ;; + *) + HAVE_CPU_SPINWAIT=0 + ;; +esac +AC_DEFINE_UNQUOTED([HAVE_CPU_SPINWAIT], [$HAVE_CPU_SPINWAIT]) +AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT]) + +AC_ARG_WITH([lg_vaddr], + [AS_HELP_STRING([--with-lg-vaddr=], [Number of significant virtual address bits])], + [LG_VADDR="$with_lg_vaddr"], [LG_VADDR="detect"]) + +case "${host_cpu}" in + aarch64) + if test "x$LG_VADDR" = "xdetect"; then + AC_MSG_CHECKING([number of significant virtual address bits]) + if test "x${LG_SIZEOF_PTR}" = "x2" ; then + #aarch64 ILP32 + LG_VADDR=32 + else + #aarch64 LP64 + LG_VADDR=48 + fi + AC_MSG_RESULT([$LG_VADDR]) + fi + ;; + x86_64) + if test "x$LG_VADDR" = "xdetect"; then + AC_CACHE_CHECK([number of significant virtual address bits], + [je_cv_lg_vaddr], + AC_RUN_IFELSE([AC_LANG_PROGRAM( +[[ +#include +#ifdef _WIN32 +#include +#include +typedef unsigned __int32 uint32_t; +#else +#include +#endif +]], [[ + uint32_t r[[4]]; + uint32_t eax_in = 0x80000008U; +#ifdef _WIN32 + __cpuid((int *)r, (int)eax_in); +#else + asm volatile ("cpuid" + : "=a" (r[[0]]), "=b" (r[[1]]), "=c" (r[[2]]), "=d" (r[[3]]) + : "a" (eax_in), "c" (0) + ); +#endif + uint32_t eax_out = r[[0]]; + uint32_t vaddr = ((eax_out & 0x0000ff00U) >> 8); + FILE *f = fopen("conftest.out", "w"); + if (f == NULL) { + return 1; + } + if (vaddr > (sizeof(void *) << 3)) { + vaddr = sizeof(void *) << 3; + } + fprintf(f, "%u", vaddr); + fclose(f); + return 0; +]])], + [je_cv_lg_vaddr=`cat conftest.out`], + [je_cv_lg_vaddr=error], + [je_cv_lg_vaddr=57])) + if test "x${je_cv_lg_vaddr}" != "x" ; then + LG_VADDR="${je_cv_lg_vaddr}" + fi + if test "x${LG_VADDR}" != "xerror" ; then + AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR]) + else + AC_MSG_ERROR([cannot determine number of significant virtual address bits]) + fi + fi + ;; + *) + if test "x$LG_VADDR" = "xdetect"; then + AC_MSG_CHECKING([number of significant virtual address bits]) + if test "x${LG_SIZEOF_PTR}" = "x3" ; then + LG_VADDR=64 + elif test "x${LG_SIZEOF_PTR}" = "x2" ; then + LG_VADDR=32 + elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then + LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))" + else + AC_MSG_ERROR([Unsupported lg(pointer size): ${LG_SIZEOF_PTR}]) + fi + AC_MSG_RESULT([$LG_VADDR]) + fi + ;; +esac +AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR]) + +LD_PRELOAD_VAR="LD_PRELOAD" +so="so" +importlib="${so}" +o="$ac_objext" +a="a" +exe="$ac_exeext" +libprefix="lib" +link_whole_archive="0" +DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' +RPATH='-Wl,-rpath,$(1)' +SOREV="${so}.${rev}" +PIC_CFLAGS='-fPIC -DPIC' +CTARGET='-o $@' +LDTARGET='-o $@' +TEST_LD_MODE= +EXTRA_LDFLAGS= +ARFLAGS='crs' +AROUT=' $@' +CC_MM=1 + +if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then + TEST_LD_MODE='-dynamic' +fi + +if test "x${je_cv_cray}" = "xyes" ; then + CC_MM= +fi + +AN_MAKEVAR([AR], [AC_PROG_AR]) +AN_PROGRAM([ar], [AC_PROG_AR]) +AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)]) +AC_PROG_AR + +AN_MAKEVAR([NM], [AC_PROG_NM]) +AN_PROGRAM([nm], [AC_PROG_NM]) +AC_DEFUN([AC_PROG_NM], [AC_CHECK_TOOL(NM, nm, :)]) +AC_PROG_NM + +AC_PROG_AWK + +dnl ============================================================================ +dnl jemalloc version. +dnl + +AC_ARG_WITH([version], + [AS_HELP_STRING([--with-version=..--g], + [Version string])], + [ + echo "${with_version}" | grep ['^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$'] 2>&1 1>/dev/null + if test $? -eq 0 ; then + echo "$with_version" > "${objroot}VERSION" + else + echo "${with_version}" | grep ['^VERSION$'] 2>&1 1>/dev/null + if test $? -ne 0 ; then + AC_MSG_ERROR([${with_version} does not match ..--g or VERSION]) + fi + fi + ], [ + dnl Set VERSION if source directory is inside a git repository. + if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then + dnl Pattern globs aren't powerful enough to match both single- and + dnl double-digit version numbers, so iterate over patterns to support up + dnl to version 99.99.99 without any accidental matches. + for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ + '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ + '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ + '[0-9][0-9].[0-9][0-9].[0-9]' \ + '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do + (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null + if test $? -eq 0 ; then + mv "${objroot}VERSION.tmp" "${objroot}VERSION" + break + fi + done + fi + rm -f "${objroot}VERSION.tmp" + ]) + +if test ! -e "${objroot}VERSION" ; then + if test ! -e "${srcroot}VERSION" ; then + AC_MSG_RESULT( + [Missing VERSION file, and unable to generate it; creating bogus VERSION]) + echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION" + else + cp ${srcroot}VERSION ${objroot}VERSION + fi +fi +jemalloc_version=`cat "${objroot}VERSION"` +jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'` +jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'` +jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'` +jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]4}'` +jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]5}'` +AC_SUBST([jemalloc_version]) +AC_SUBST([jemalloc_version_major]) +AC_SUBST([jemalloc_version_minor]) +AC_SUBST([jemalloc_version_bugfix]) +AC_SUBST([jemalloc_version_nrev]) +AC_SUBST([jemalloc_version_gid]) + +dnl Platform-specific settings. abi and RPATH can probably be determined +dnl programmatically, but doing so is error-prone, which makes it generally +dnl not worth the trouble. +dnl +dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the +dnl definitions need to be seen before any headers are included, which is a pain +dnl to make happen otherwise. +default_retain="0" +maps_coalesce="1" +DUMP_SYMS="${NM} -a" +SYM_PREFIX="" +case "${host}" in + *-*-darwin* | *-*-ios*) + abi="macho" + RPATH="" + LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" + so="dylib" + importlib="${so}" + force_tls="0" + DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)' + SOREV="${rev}.${so}" + sbrk_deprecated="1" + SYM_PREFIX="_" + ;; + *-*-freebsd*) + abi="elf" + AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ]) + force_lazy_lock="1" + ;; + *-*-dragonfly*) + abi="elf" + ;; + *-*-openbsd*) + abi="elf" + force_tls="0" + ;; + *-*-bitrig*) + abi="elf" + ;; + *-*-linux-android) + dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. + JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE) + abi="elf" + AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ]) + AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) + AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ]) + AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) + AC_DEFINE([JEMALLOC_C11_ATOMICS]) + force_tls="0" + if test "${LG_SIZEOF_PTR}" = "3"; then + default_retain="1" + fi + ;; + *-*-linux*) + dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. + JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE) + abi="elf" + AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ]) + AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) + AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ]) + AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) + AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ]) + if test "${LG_SIZEOF_PTR}" = "3"; then + default_retain="1" + fi + ;; + *-*-kfreebsd*) + dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. + JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE) + abi="elf" + AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) + AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ]) + AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) + AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ]) + ;; + *-*-netbsd*) + AC_MSG_CHECKING([ABI]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM( +[[#ifdef __ELF__ +/* ELF */ +#else +#error aout +#endif +]])], + [abi="elf"], + [abi="aout"]) + AC_MSG_RESULT([$abi]) + ;; + *-*-solaris2*) + abi="elf" + RPATH='-Wl,-R,$(1)' + dnl Solaris needs this for sigwait(). + JE_APPEND_VS(CPPFLAGS, -D_POSIX_PTHREAD_SEMANTICS) + JE_APPEND_VS(LIBS, -lposix4 -lsocket -lnsl) + ;; + *-ibm-aix*) + if test "${LG_SIZEOF_PTR}" = "3"; then + dnl 64bit AIX + LD_PRELOAD_VAR="LDR_PRELOAD64" + else + dnl 32bit AIX + LD_PRELOAD_VAR="LDR_PRELOAD" + fi + abi="xcoff" + ;; + *-*-mingw* | *-*-cygwin*) + abi="pecoff" + force_tls="0" + maps_coalesce="0" + RPATH="" + so="dll" + if test "x$je_cv_msvc" = "xyes" ; then + importlib="lib" + DSO_LDFLAGS="-LD" + EXTRA_LDFLAGS="-link -DEBUG" + CTARGET='-Fo$@' + LDTARGET='-Fe$@' + AR='lib' + ARFLAGS='-nologo -out:' + AROUT='$@' + CC_MM= + else + importlib="${so}" + DSO_LDFLAGS="-shared" + link_whole_archive="1" + fi + case "${host}" in + *-*-cygwin*) + DUMP_SYMS="dumpbin /SYMBOLS" + ;; + *) + ;; + esac + a="lib" + libprefix="" + SOREV="${so}" + PIC_CFLAGS="" + if test "${LG_SIZEOF_PTR}" = "3"; then + default_retain="1" + fi + ;; + *) + AC_MSG_RESULT([Unsupported operating system: ${host}]) + abi="elf" + ;; +esac + +JEMALLOC_USABLE_SIZE_CONST=const +AC_CHECK_HEADERS([malloc.h], [ + AC_MSG_CHECKING([whether malloc_usable_size definition can use const argument]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM( + [#include + #include + size_t malloc_usable_size(const void *ptr); + ], + [])],[ + AC_MSG_RESULT([yes]) + ],[ + JEMALLOC_USABLE_SIZE_CONST= + AC_MSG_RESULT([no]) + ]) +]) +AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST]) +AC_SUBST([abi]) +AC_SUBST([RPATH]) +AC_SUBST([LD_PRELOAD_VAR]) +AC_SUBST([so]) +AC_SUBST([importlib]) +AC_SUBST([o]) +AC_SUBST([a]) +AC_SUBST([exe]) +AC_SUBST([libprefix]) +AC_SUBST([link_whole_archive]) +AC_SUBST([DSO_LDFLAGS]) +AC_SUBST([EXTRA_LDFLAGS]) +AC_SUBST([SOREV]) +AC_SUBST([PIC_CFLAGS]) +AC_SUBST([CTARGET]) +AC_SUBST([LDTARGET]) +AC_SUBST([TEST_LD_MODE]) +AC_SUBST([MKLIB]) +AC_SUBST([ARFLAGS]) +AC_SUBST([AROUT]) +AC_SUBST([DUMP_SYMS]) +AC_SUBST([CC_MM]) + +dnl Determine whether libm must be linked to use e.g. log(3). +AC_SEARCH_LIBS([log], [m], , [AC_MSG_ERROR([Missing math functions])]) +if test "x$ac_cv_search_log" != "xnone required" ; then + LM="$ac_cv_search_log" +else + LM= +fi +AC_SUBST(LM) + +JE_COMPILABLE([__attribute__ syntax], + [static __attribute__((unused)) void foo(void){}], + [], + [je_cv_attribute]) +if test "x${je_cv_attribute}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ]) + if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then + JE_CFLAGS_ADD([-fvisibility=hidden]) + JE_CXXFLAGS_ADD([-fvisibility=hidden]) + fi +fi +dnl Check for tls_model attribute support (clang 3.0 still lacks support). +JE_CFLAGS_SAVE() +JE_CFLAGS_ADD([-Werror]) +JE_CFLAGS_ADD([-herror_on_warning]) +JE_COMPILABLE([tls_model attribute], [], + [static __thread int + __attribute__((tls_model("initial-exec"), unused)) foo; + foo = 0;], + [je_cv_tls_model]) +JE_CFLAGS_RESTORE() +dnl (Setting of JEMALLOC_TLS_MODEL is done later, after we've checked for +dnl --disable-initial-exec-tls) + +dnl Check for alloc_size attribute support. +JE_CFLAGS_SAVE() +JE_CFLAGS_ADD([-Werror]) +JE_CFLAGS_ADD([-herror_on_warning]) +JE_COMPILABLE([alloc_size attribute], [#include ], + [void *foo(size_t size) __attribute__((alloc_size(1)));], + [je_cv_alloc_size]) +JE_CFLAGS_RESTORE() +if test "x${je_cv_alloc_size}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ]) +fi +dnl Check for format(gnu_printf, ...) attribute support. +JE_CFLAGS_SAVE() +JE_CFLAGS_ADD([-Werror]) +JE_CFLAGS_ADD([-herror_on_warning]) +JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include ], + [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));], + [je_cv_format_gnu_printf]) +JE_CFLAGS_RESTORE() +if test "x${je_cv_format_gnu_printf}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ]) +fi +dnl Check for format(printf, ...) attribute support. +JE_CFLAGS_SAVE() +JE_CFLAGS_ADD([-Werror]) +JE_CFLAGS_ADD([-herror_on_warning]) +JE_COMPILABLE([format(printf, ...) attribute], [#include ], + [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));], + [je_cv_format_printf]) +JE_CFLAGS_RESTORE() +if test "x${je_cv_format_printf}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ]) +fi + +dnl Check for format_arg(...) attribute support. +JE_CFLAGS_SAVE() +JE_CFLAGS_ADD([-Werror]) +JE_CFLAGS_ADD([-herror_on_warning]) +JE_COMPILABLE([format(printf, ...) attribute], [#include ], + [const char * __attribute__((__format_arg__(1))) foo(const char *format);], + [je_cv_format_arg]) +JE_CFLAGS_RESTORE() +if test "x${je_cv_format_arg}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_ARG], [ ]) +fi + +dnl Support optional additions to rpath. +AC_ARG_WITH([rpath], + [AS_HELP_STRING([--with-rpath=], [Colon-separated rpath (ELF systems only)])], +if test "x$with_rpath" = "xno" ; then + RPATH_EXTRA= +else + RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`" +fi, + RPATH_EXTRA= +) +AC_SUBST([RPATH_EXTRA]) + +dnl Disable rules that do automatic regeneration of configure output by default. +AC_ARG_ENABLE([autogen], + [AS_HELP_STRING([--enable-autogen], [Automatically regenerate configure output])], +if test "x$enable_autogen" = "xno" ; then + enable_autogen="0" +else + enable_autogen="1" +fi +, +enable_autogen="0" +) +AC_SUBST([enable_autogen]) + +AC_PROG_INSTALL +AC_PROG_RANLIB +AC_PATH_PROG([LD], [ld], [false], [$PATH]) +AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH]) + +dnl Enable documentation +AC_ARG_ENABLE([doc], + [AS_HELP_STRING([--enable-documentation], [Build documentation])], +if test "x$enable_doc" = "xno" ; then + enable_doc="0" +else + enable_doc="1" +fi +, +enable_doc="1" +) +AC_SUBST([enable_doc]) + +dnl Enable shared libs +AC_ARG_ENABLE([shared], + [AS_HELP_STRING([--enable-shared], [Build shared libaries])], +if test "x$enable_shared" = "xno" ; then + enable_shared="0" +else + enable_shared="1" +fi +, +enable_shared="1" +) +AC_SUBST([enable_shared]) + +dnl Enable static libs +AC_ARG_ENABLE([static], + [AS_HELP_STRING([--enable-static], [Build static libaries])], +if test "x$enable_static" = "xno" ; then + enable_static="0" +else + enable_static="1" +fi +, +enable_static="1" +) +AC_SUBST([enable_static]) + +if test "$enable_shared$enable_static" = "00" ; then + AC_MSG_ERROR([Please enable one of shared or static builds]) +fi + +dnl Perform no name mangling by default. +AC_ARG_WITH([mangling], + [AS_HELP_STRING([--with-mangling=], [Mangle symbols in ])], + [mangling_map="$with_mangling"], [mangling_map=""]) + +dnl Do not prefix public APIs by default. +AC_ARG_WITH([jemalloc_prefix], + [AS_HELP_STRING([--with-jemalloc-prefix=], [Prefix to prepend to all public APIs])], + [JEMALLOC_PREFIX="$with_jemalloc_prefix"], + [if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then + JEMALLOC_PREFIX="" +else + JEMALLOC_PREFIX="je_" +fi] +) +if test "x$JEMALLOC_PREFIX" = "x" ; then + AC_DEFINE([JEMALLOC_IS_MALLOC]) +else + JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"` + AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"]) + AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"]) +fi +AC_SUBST([JEMALLOC_PREFIX]) +AC_SUBST([JEMALLOC_CPREFIX]) + +AC_ARG_WITH([export], + [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])], + [if test "x$with_export" = "xno"; then + AC_DEFINE([JEMALLOC_EXPORT],[]) +fi] +) + +public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_message malloc_stats_print malloc_usable_size mallocx smallocx_${jemalloc_version_gid} nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx" +dnl Check for additional platform-specific public API functions. +AC_CHECK_FUNC([memalign], + [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ]) + public_syms="${public_syms} memalign"]) +AC_CHECK_FUNC([valloc], + [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ]) + public_syms="${public_syms} valloc"]) + +dnl Check for allocator-related functions that should be wrapped. +wrap_syms= +if test "x${JEMALLOC_PREFIX}" = "x" ; then + AC_CHECK_FUNC([__libc_calloc], + [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_CALLOC], [ ]) + wrap_syms="${wrap_syms} __libc_calloc"]) + AC_CHECK_FUNC([__libc_free], + [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_FREE], [ ]) + wrap_syms="${wrap_syms} __libc_free"]) + AC_CHECK_FUNC([__libc_malloc], + [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MALLOC], [ ]) + wrap_syms="${wrap_syms} __libc_malloc"]) + AC_CHECK_FUNC([__libc_memalign], + [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MEMALIGN], [ ]) + wrap_syms="${wrap_syms} __libc_memalign"]) + AC_CHECK_FUNC([__libc_realloc], + [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_REALLOC], [ ]) + wrap_syms="${wrap_syms} __libc_realloc"]) + AC_CHECK_FUNC([__libc_valloc], + [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_VALLOC], [ ]) + wrap_syms="${wrap_syms} __libc_valloc"]) + AC_CHECK_FUNC([__posix_memalign], + [AC_DEFINE([JEMALLOC_OVERRIDE___POSIX_MEMALIGN], [ ]) + wrap_syms="${wrap_syms} __posix_memalign"]) +fi + +case "${host}" in + *-*-mingw* | *-*-cygwin*) + wrap_syms="${wrap_syms} tls_callback" + ;; + *) + ;; +esac + +dnl Mangle library-private APIs. +AC_ARG_WITH([private_namespace], + [AS_HELP_STRING([--with-private-namespace=], [Prefix to prepend to all library-private APIs])], + [JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"], + [JEMALLOC_PRIVATE_NAMESPACE="je_"] +) +AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], [$JEMALLOC_PRIVATE_NAMESPACE]) +private_namespace="$JEMALLOC_PRIVATE_NAMESPACE" +AC_SUBST([private_namespace]) + +dnl Do not add suffix to installed files by default. +AC_ARG_WITH([install_suffix], + [AS_HELP_STRING([--with-install-suffix=], [Suffix to append to all installed files])], + [INSTALL_SUFFIX="$with_install_suffix"], + [INSTALL_SUFFIX=] +) +install_suffix="$INSTALL_SUFFIX" +AC_SUBST([install_suffix]) + +dnl Specify default malloc_conf. +AC_ARG_WITH([malloc_conf], + [AS_HELP_STRING([--with-malloc-conf=], [config.malloc_conf options string])], + [JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"], + [JEMALLOC_CONFIG_MALLOC_CONF=""] +) +config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF" +AC_DEFINE_UNQUOTED([JEMALLOC_CONFIG_MALLOC_CONF], ["$config_malloc_conf"]) + +dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of +dnl jemalloc_protos_jet.h easy. +je_="je_" +AC_SUBST([je_]) + +cfgoutputs_in="Makefile.in" +cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in" +cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in" +cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in" +cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in" +cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in" +cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in" +cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in" +cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_preamble.h.in" +cfgoutputs_in="${cfgoutputs_in} test/test.sh.in" +cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in" + +cfgoutputs_out="Makefile" +cfgoutputs_out="${cfgoutputs_out} jemalloc.pc" +cfgoutputs_out="${cfgoutputs_out} doc/html.xsl" +cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl" +cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_preamble.h" +cfgoutputs_out="${cfgoutputs_out} test/test.sh" +cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h" + +cfgoutputs_tup="Makefile" +cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in" +cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in" +cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in" +cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_preamble.h" +cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in" +cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in" + +cfghdrs_in="include/jemalloc/jemalloc_defs.h.in" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh" +cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in" + +cfghdrs_out="include/jemalloc/jemalloc_defs.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols.awk" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols_jet.awk" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h" +cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h" + +cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in" +cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in" +cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in" + +dnl ============================================================================ +dnl jemalloc build options. +dnl + +dnl Do not compile with debugging by default. +AC_ARG_ENABLE([debug], + [AS_HELP_STRING([--enable-debug], + [Build debugging code])], +[if test "x$enable_debug" = "xno" ; then + enable_debug="0" +else + enable_debug="1" +fi +], +[enable_debug="0"] +) +if test "x$enable_debug" = "x1" ; then + AC_DEFINE([JEMALLOC_DEBUG], [ ]) +fi +if test "x$enable_debug" = "x1" ; then + AC_DEFINE([JEMALLOC_DEBUG], [ ]) +fi +AC_SUBST([enable_debug]) + +dnl Only optimize if not debugging. +if test "x$enable_debug" = "x0" ; then + if test "x$GCC" = "xyes" ; then + JE_CFLAGS_ADD([-O3]) + JE_CXXFLAGS_ADD([-O3]) + JE_CFLAGS_ADD([-funroll-loops]) + elif test "x$je_cv_msvc" = "xyes" ; then + JE_CFLAGS_ADD([-O2]) + JE_CXXFLAGS_ADD([-O2]) + else + JE_CFLAGS_ADD([-O]) + JE_CXXFLAGS_ADD([-O]) + fi +fi + +dnl Enable statistics calculation by default. +AC_ARG_ENABLE([stats], + [AS_HELP_STRING([--disable-stats], + [Disable statistics calculation/reporting])], +[if test "x$enable_stats" = "xno" ; then + enable_stats="0" +else + enable_stats="1" +fi +], +[enable_stats="1"] +) +if test "x$enable_stats" = "x1" ; then + AC_DEFINE([JEMALLOC_STATS], [ ]) +fi +AC_SUBST([enable_stats]) + +dnl Do not enable smallocx by default. +AC_ARG_ENABLE([experimental_smallocx], + [AS_HELP_STRING([--enable-experimental-smallocx], [Enable experimental smallocx API])], +[if test "x$enable_experimental_smallocx" = "xno" ; then +enable_experimental_smallocx="0" +else +enable_experimental_smallocx="1" +fi +], +[enable_experimental_smallocx="0"] +) +if test "x$enable_experimental_smallocx" = "x1" ; then + AC_DEFINE([JEMALLOC_EXPERIMENTAL_SMALLOCX_API]) +fi +AC_SUBST([enable_experimental_smallocx]) + +dnl Do not enable profiling by default. +AC_ARG_ENABLE([prof], + [AS_HELP_STRING([--enable-prof], [Enable allocation profiling])], +[if test "x$enable_prof" = "xno" ; then + enable_prof="0" +else + enable_prof="1" +fi +], +[enable_prof="0"] +) +if test "x$enable_prof" = "x1" ; then + backtrace_method="" +else + backtrace_method="N/A" +fi + +AC_ARG_ENABLE([prof-libunwind], + [AS_HELP_STRING([--enable-prof-libunwind], [Use libunwind for backtracing])], +[if test "x$enable_prof_libunwind" = "xno" ; then + enable_prof_libunwind="0" +else + enable_prof_libunwind="1" +fi +], +[enable_prof_libunwind="0"] +) +AC_ARG_WITH([static_libunwind], + [AS_HELP_STRING([--with-static-libunwind=], + [Path to static libunwind library; use rather than dynamically linking])], +if test "x$with_static_libunwind" = "xno" ; then + LUNWIND="-lunwind" +else + if test ! -f "$with_static_libunwind" ; then + AC_MSG_ERROR([Static libunwind not found: $with_static_libunwind]) + fi + LUNWIND="$with_static_libunwind" +fi, + LUNWIND="-lunwind" +) +if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then + AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"]) + if test "x$LUNWIND" = "x-lunwind" ; then + AC_CHECK_LIB([unwind], [unw_backtrace], [JE_APPEND_VS(LIBS, $LUNWIND)], + [enable_prof_libunwind="0"]) + else + JE_APPEND_VS(LIBS, $LUNWIND) + fi + if test "x${enable_prof_libunwind}" = "x1" ; then + backtrace_method="libunwind" + AC_DEFINE([JEMALLOC_PROF_LIBUNWIND], [ ]) + fi +fi + +AC_ARG_ENABLE([prof-libgcc], + [AS_HELP_STRING([--disable-prof-libgcc], + [Do not use libgcc for backtracing])], +[if test "x$enable_prof_libgcc" = "xno" ; then + enable_prof_libgcc="0" +else + enable_prof_libgcc="1" +fi +], +[enable_prof_libgcc="1"] +) +if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \ + -a "x$GCC" = "xyes" ; then + AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"]) + if test "x${enable_prof_libgcc}" = "x1" ; then + AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [JE_APPEND_VS(LIBS, -lgcc)], [enable_prof_libgcc="0"]) + fi + if test "x${enable_prof_libgcc}" = "x1" ; then + backtrace_method="libgcc" + AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ]) + fi +else + enable_prof_libgcc="0" +fi + +AC_ARG_ENABLE([prof-gcc], + [AS_HELP_STRING([--disable-prof-gcc], + [Do not use gcc intrinsics for backtracing])], +[if test "x$enable_prof_gcc" = "xno" ; then + enable_prof_gcc="0" +else + enable_prof_gcc="1" +fi +], +[enable_prof_gcc="1"] +) +if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \ + -a "x$GCC" = "xyes" ; then + JE_CFLAGS_ADD([-fno-omit-frame-pointer]) + backtrace_method="gcc intrinsics" + AC_DEFINE([JEMALLOC_PROF_GCC], [ ]) +else + enable_prof_gcc="0" +fi + +if test "x$backtrace_method" = "x" ; then + backtrace_method="none (disabling profiling)" + enable_prof="0" +fi +AC_MSG_CHECKING([configured backtracing method]) +AC_MSG_RESULT([$backtrace_method]) +if test "x$enable_prof" = "x1" ; then + dnl Heap profiling uses the log(3) function. + JE_APPEND_VS(LIBS, $LM) + + AC_DEFINE([JEMALLOC_PROF], [ ]) +fi +AC_SUBST([enable_prof]) + +dnl Indicate whether adjacent virtual memory mappings automatically coalesce +dnl (and fragment on demand). +if test "x${maps_coalesce}" = "x1" ; then + AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ]) +fi + +dnl Indicate whether to retain memory (rather than using munmap()) by default. +if test "x$default_retain" = "x1" ; then + AC_DEFINE([JEMALLOC_RETAIN], [ ]) +fi + +dnl Enable allocation from DSS if supported by the OS. +have_dss="1" +dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support. +AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"]) +if test "x$have_sbrk" = "x1" ; then + if test "x$sbrk_deprecated" = "x1" ; then + AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated]) + have_dss="0" + fi +else + have_dss="0" +fi + +if test "x$have_dss" = "x1" ; then + AC_DEFINE([JEMALLOC_DSS], [ ]) +fi + +dnl Support the junk/zero filling option by default. +AC_ARG_ENABLE([fill], + [AS_HELP_STRING([--disable-fill], [Disable support for junk/zero filling])], +[if test "x$enable_fill" = "xno" ; then + enable_fill="0" +else + enable_fill="1" +fi +], +[enable_fill="1"] +) +if test "x$enable_fill" = "x1" ; then + AC_DEFINE([JEMALLOC_FILL], [ ]) +fi +AC_SUBST([enable_fill]) + +dnl Disable utrace(2)-based tracing by default. +AC_ARG_ENABLE([utrace], + [AS_HELP_STRING([--enable-utrace], [Enable utrace(2)-based tracing])], +[if test "x$enable_utrace" = "xno" ; then + enable_utrace="0" +else + enable_utrace="1" +fi +], +[enable_utrace="0"] +) +JE_COMPILABLE([utrace(2)], [ +#include +#include +#include +#include +#include +], [ + utrace((void *)0, 0); +], [je_cv_utrace]) +if test "x${je_cv_utrace}" = "xno" ; then + enable_utrace="0" +fi +if test "x$enable_utrace" = "x1" ; then + AC_DEFINE([JEMALLOC_UTRACE], [ ]) +fi +AC_SUBST([enable_utrace]) + +dnl Do not support the xmalloc option by default. +AC_ARG_ENABLE([xmalloc], + [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])], +[if test "x$enable_xmalloc" = "xno" ; then + enable_xmalloc="0" +else + enable_xmalloc="1" +fi +], +[enable_xmalloc="0"] +) +if test "x$enable_xmalloc" = "x1" ; then + AC_DEFINE([JEMALLOC_XMALLOC], [ ]) +fi +AC_SUBST([enable_xmalloc]) + +dnl Support cache-oblivious allocation alignment by default. +AC_ARG_ENABLE([cache-oblivious], + [AS_HELP_STRING([--disable-cache-oblivious], + [Disable support for cache-oblivious allocation alignment])], +[if test "x$enable_cache_oblivious" = "xno" ; then + enable_cache_oblivious="0" +else + enable_cache_oblivious="1" +fi +], +[enable_cache_oblivious="1"] +) +if test "x$enable_cache_oblivious" = "x1" ; then + AC_DEFINE([JEMALLOC_CACHE_OBLIVIOUS], [ ]) +fi +AC_SUBST([enable_cache_oblivious]) + +dnl Do not log by default. +AC_ARG_ENABLE([log], + [AS_HELP_STRING([--enable-log], [Support debug logging])], +[if test "x$enable_log" = "xno" ; then + enable_log="0" +else + enable_log="1" +fi +], +[enable_log="0"] +) +if test "x$enable_log" = "x1" ; then + AC_DEFINE([JEMALLOC_LOG], [ ]) +fi +AC_SUBST([enable_log]) + +dnl Do not use readlinkat by default +AC_ARG_ENABLE([readlinkat], + [AS_HELP_STRING([--enable-readlinkat], [Use readlinkat over readlink])], +[if test "x$enable_readlinkat" = "xno" ; then + enable_readlinkat="0" +else + enable_readlinkat="1" +fi +], +[enable_readlinkat="0"] +) +if test "x$enable_readlinkat" = "x1" ; then + AC_DEFINE([JEMALLOC_READLINKAT], [ ]) +fi +AC_SUBST([enable_readlinkat]) + +dnl Avoid extra safety checks by default +AC_ARG_ENABLE([opt-safety-checks], + [AS_HELP_STRING([--enable-opt-safety-checks], + [Perform certain low-overhead checks, even in opt mode])], +[if test "x$enable_opt_safety_checks" = "xno" ; then + enable_opt_safety_checks="0" +else + enable_opt_safety_checks="1" +fi +], +[enable_opt_safety_checks="0"] +) +if test "x$enable_opt_safety_checks" = "x1" ; then + AC_DEFINE([JEMALLOC_OPT_SAFETY_CHECKS], [ ]) +fi +AC_SUBST([enable_opt_safety_checks]) + +JE_COMPILABLE([a program using __builtin_unreachable], [ +void foo (void) { + __builtin_unreachable(); +} +], [ + { + foo(); + } +], [je_cv_gcc_builtin_unreachable]) +if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then + AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [__builtin_unreachable]) +else + AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [abort]) +fi + +dnl ============================================================================ +dnl Check for __builtin_ffsl(), then ffsl(3), and fail if neither are found. +dnl One of those two functions should (theoretically) exist on all platforms +dnl that jemalloc currently has a chance of functioning on without modification. +dnl We additionally assume ffs[ll]() or __builtin_ffs[ll]() are defined if +dnl ffsl() or __builtin_ffsl() are defined, respectively. +JE_COMPILABLE([a program using __builtin_ffsl], [ +#include +#include +#include +], [ + { + int rv = __builtin_ffsl(0x08); + printf("%d\n", rv); + } +], [je_cv_gcc_builtin_ffsl]) +if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then + AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [__builtin_ffsll]) + AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl]) + AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs]) +else + JE_COMPILABLE([a program using ffsl], [ + #include + #include + #include + ], [ + { + int rv = ffsl(0x08); + printf("%d\n", rv); + } + ], [je_cv_function_ffsl]) + if test "x${je_cv_function_ffsl}" = "xyes" ; then + AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [ffsll]) + AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl]) + AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs]) + else + AC_MSG_ERROR([Cannot build without ffsl(3) or __builtin_ffsl()]) + fi +fi + +JE_COMPILABLE([a program using __builtin_popcountl], [ +#include +#include +#include +], [ + { + int rv = __builtin_popcountl(0x08); + printf("%d\n", rv); + } +], [je_cv_gcc_builtin_popcountl]) +if test "x${je_cv_gcc_builtin_popcountl}" = "xyes" ; then + AC_DEFINE([JEMALLOC_INTERNAL_POPCOUNT], [__builtin_popcount]) + AC_DEFINE([JEMALLOC_INTERNAL_POPCOUNTL], [__builtin_popcountl]) +fi + +AC_ARG_WITH([lg_quantum], + [AS_HELP_STRING([--with-lg-quantum=], + [Base 2 log of minimum allocation alignment])], + [LG_QUANTA="$with_lg_quantum"], + [LG_QUANTA="3 4"]) +if test "x$with_lg_quantum" != "x" ; then + AC_DEFINE_UNQUOTED([LG_QUANTUM], [$with_lg_quantum]) +fi + +AC_ARG_WITH([lg_page], + [AS_HELP_STRING([--with-lg-page=], [Base 2 log of system page size])], + [LG_PAGE="$with_lg_page"], [LG_PAGE="detect"]) +if test "x$LG_PAGE" = "xdetect"; then + AC_CACHE_CHECK([LG_PAGE], + [je_cv_lg_page], + AC_RUN_IFELSE([AC_LANG_PROGRAM( +[[ +#include +#ifdef _WIN32 +#include +#else +#include +#endif +#include +]], +[[ + int result; + FILE *f; + +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + result = si.dwPageSize; +#else + result = sysconf(_SC_PAGESIZE); +#endif + if (result == -1) { + return 1; + } + result = JEMALLOC_INTERNAL_FFSL(result) - 1; + + f = fopen("conftest.out", "w"); + if (f == NULL) { + return 1; + } + fprintf(f, "%d", result); + fclose(f); + + return 0; +]])], + [je_cv_lg_page=`cat conftest.out`], + [je_cv_lg_page=undefined], + [je_cv_lg_page=12])) +fi +if test "x${je_cv_lg_page}" != "x" ; then + LG_PAGE="${je_cv_lg_page}" +fi +if test "x${LG_PAGE}" != "xundefined" ; then + AC_DEFINE_UNQUOTED([LG_PAGE], [$LG_PAGE]) +else + AC_MSG_ERROR([cannot determine value for LG_PAGE]) +fi + +AC_ARG_WITH([lg_hugepage], + [AS_HELP_STRING([--with-lg-hugepage=], + [Base 2 log of system huge page size])], + [je_cv_lg_hugepage="${with_lg_hugepage}"], + [je_cv_lg_hugepage=""]) +if test "x${je_cv_lg_hugepage}" = "x" ; then + dnl Look in /proc/meminfo (Linux-specific) for information on the default huge + dnl page size, if any. The relevant line looks like: + dnl + dnl Hugepagesize: 2048 kB + if test -e "/proc/meminfo" ; then + hpsk=[`cat /proc/meminfo 2>/dev/null | \ + grep -e '^Hugepagesize:[[:space:]]\+[0-9]\+[[:space:]]kB$' | \ + awk '{print $2}'`] + if test "x${hpsk}" != "x" ; then + je_cv_lg_hugepage=10 + while test "${hpsk}" -gt 1 ; do + hpsk="$((hpsk / 2))" + je_cv_lg_hugepage="$((je_cv_lg_hugepage + 1))" + done + fi + fi + + dnl Set default if unable to automatically configure. + if test "x${je_cv_lg_hugepage}" = "x" ; then + je_cv_lg_hugepage=21 + fi +fi +if test "x${LG_PAGE}" != "xundefined" -a \ + "${je_cv_lg_hugepage}" -lt "${LG_PAGE}" ; then + AC_MSG_ERROR([Huge page size (2^${je_cv_lg_hugepage}) must be at least page size (2^${LG_PAGE})]) +fi +AC_DEFINE_UNQUOTED([LG_HUGEPAGE], [${je_cv_lg_hugepage}]) + +dnl ============================================================================ +dnl Enable libdl by default. +AC_ARG_ENABLE([libdl], + [AS_HELP_STRING([--disable-libdl], + [Do not use libdl])], +[if test "x$enable_libdl" = "xno" ; then + enable_libdl="0" +else + enable_libdl="1" +fi +], +[enable_libdl="1"] +) +AC_SUBST([libdl]) + +dnl ============================================================================ +dnl Configure pthreads. + +if test "x$abi" != "xpecoff" ; then + AC_DEFINE([JEMALLOC_HAVE_PTHREAD], [ ]) + AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])]) + dnl Some systems may embed pthreads functionality in libc; check for libpthread + dnl first, but try libc too before failing. + AC_CHECK_LIB([pthread], [pthread_create], [JE_APPEND_VS(LIBS, -pthread)], + [AC_SEARCH_LIBS([pthread_create], , , + AC_MSG_ERROR([libpthread is missing]))]) + wrap_syms="${wrap_syms} pthread_create" + have_pthread="1" + +dnl Check if we have dlsym support. + if test "x$enable_libdl" = "x1" ; then + have_dlsym="1" + AC_CHECK_HEADERS([dlfcn.h], + AC_CHECK_FUNC([dlsym], [], + [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], [have_dlsym="0"])]), + [have_dlsym="0"]) + if test "x$have_dlsym" = "x1" ; then + AC_DEFINE([JEMALLOC_HAVE_DLSYM], [ ]) + fi + else + have_dlsym="0" + fi + + JE_COMPILABLE([pthread_atfork(3)], [ +#include +], [ + pthread_atfork((void *)0, (void *)0, (void *)0); +], [je_cv_pthread_atfork]) + if test "x${je_cv_pthread_atfork}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_PTHREAD_ATFORK], [ ]) + fi + dnl Check if pthread_setname_np is available with the expected API. + JE_COMPILABLE([pthread_setname_np(3)], [ +#include +], [ + pthread_setname_np(pthread_self(), "setname_test"); +], [je_cv_pthread_setname_np]) + if test "x${je_cv_pthread_setname_np}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_PTHREAD_SETNAME_NP], [ ]) + fi +fi + +JE_APPEND_VS(CPPFLAGS, -D_REENTRANT) + +dnl Check whether clock_gettime(2) is in libc or librt. +AC_SEARCH_LIBS([clock_gettime], [rt]) + +dnl Cray wrapper compiler often adds `-lrt` when using `-static`. Check with +dnl `-dynamic` as well in case a user tries to dynamically link in jemalloc +if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then + if test "$ac_cv_search_clock_gettime" != "-lrt"; then + JE_CFLAGS_SAVE() + + unset ac_cv_search_clock_gettime + JE_CFLAGS_ADD([-dynamic]) + AC_SEARCH_LIBS([clock_gettime], [rt]) + + JE_CFLAGS_RESTORE() + fi +fi + +dnl check for CLOCK_MONOTONIC_COARSE (Linux-specific). +JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC_COARSE, ...)], [ +#include +], [ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); +], [je_cv_clock_monotonic_coarse]) +if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE]) +fi + +dnl check for CLOCK_MONOTONIC. +JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC, ...)], [ +#include +#include +], [ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); +#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0 +# error _POSIX_MONOTONIC_CLOCK missing/invalid +#endif +], [je_cv_clock_monotonic]) +if test "x${je_cv_clock_monotonic}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC]) +fi + +dnl Check for mach_absolute_time(). +JE_COMPILABLE([mach_absolute_time()], [ +#include +], [ + mach_absolute_time(); +], [je_cv_mach_absolute_time]) +if test "x${je_cv_mach_absolute_time}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_MACH_ABSOLUTE_TIME]) +fi + +dnl Use syscall(2) (if available) by default. +AC_ARG_ENABLE([syscall], + [AS_HELP_STRING([--disable-syscall], [Disable use of syscall(2)])], +[if test "x$enable_syscall" = "xno" ; then + enable_syscall="0" +else + enable_syscall="1" +fi +], +[enable_syscall="1"] +) +if test "x$enable_syscall" = "x1" ; then + dnl Check if syscall(2) is usable. Treat warnings as errors, so that e.g. OS + dnl X 10.12's deprecation warning prevents use. + JE_CFLAGS_SAVE() + JE_CFLAGS_ADD([-Werror]) + JE_COMPILABLE([syscall(2)], [ +#include +#include +], [ + syscall(SYS_write, 2, "hello", 5); +], + [je_cv_syscall]) + JE_CFLAGS_RESTORE() + if test "x$je_cv_syscall" = "xyes" ; then + AC_DEFINE([JEMALLOC_USE_SYSCALL], [ ]) + fi +fi + +dnl Check if the GNU-specific secure_getenv function exists. +AC_CHECK_FUNC([secure_getenv], + [have_secure_getenv="1"], + [have_secure_getenv="0"] + ) +if test "x$have_secure_getenv" = "x1" ; then + AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ]) +fi + +dnl Check if the GNU-specific sched_getcpu function exists. +AC_CHECK_FUNC([sched_getcpu], + [have_sched_getcpu="1"], + [have_sched_getcpu="0"] + ) +if test "x$have_sched_getcpu" = "x1" ; then + AC_DEFINE([JEMALLOC_HAVE_SCHED_GETCPU], [ ]) +fi + +dnl Check if the GNU-specific sched_setaffinity function exists. +AC_CHECK_FUNC([sched_setaffinity], + [have_sched_setaffinity="1"], + [have_sched_setaffinity="0"] + ) +if test "x$have_sched_setaffinity" = "x1" ; then + AC_DEFINE([JEMALLOC_HAVE_SCHED_SETAFFINITY], [ ]) +fi + +dnl Check if the Solaris/BSD issetugid function exists. +AC_CHECK_FUNC([issetugid], + [have_issetugid="1"], + [have_issetugid="0"] + ) +if test "x$have_issetugid" = "x1" ; then + AC_DEFINE([JEMALLOC_HAVE_ISSETUGID], [ ]) +fi + +dnl Check whether the BSD-specific _malloc_thread_cleanup() exists. If so, use +dnl it rather than pthreads TSD cleanup functions to support cleanup during +dnl thread exit, in order to avoid pthreads library recursion during +dnl bootstrapping. +AC_CHECK_FUNC([_malloc_thread_cleanup], + [have__malloc_thread_cleanup="1"], + [have__malloc_thread_cleanup="0"] + ) +if test "x$have__malloc_thread_cleanup" = "x1" ; then + AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ]) + wrap_syms="${wrap_syms} _malloc_thread_cleanup" + force_tls="1" +fi + +dnl Check whether the BSD-specific _pthread_mutex_init_calloc_cb() exists. If +dnl so, mutex initialization causes allocation, and we need to implement this +dnl callback function in order to prevent recursive allocation. +AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb], + [have__pthread_mutex_init_calloc_cb="1"], + [have__pthread_mutex_init_calloc_cb="0"] + ) +if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then + AC_DEFINE([JEMALLOC_MUTEX_INIT_CB]) + wrap_syms="${wrap_syms} _malloc_prefork _malloc_postfork" +fi + +dnl Disable lazy locking by default. +AC_ARG_ENABLE([lazy_lock], + [AS_HELP_STRING([--enable-lazy-lock], + [Enable lazy locking (only lock when multi-threaded)])], +[if test "x$enable_lazy_lock" = "xno" ; then + enable_lazy_lock="0" +else + enable_lazy_lock="1" +fi +], +[enable_lazy_lock=""] +) +if test "x${enable_lazy_lock}" = "x" ; then + if test "x${force_lazy_lock}" = "x1" ; then + AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues]) + enable_lazy_lock="1" + else + enable_lazy_lock="0" + fi +fi +if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then + AC_MSG_RESULT([Forcing no lazy-lock because thread creation monitoring is unimplemented]) + enable_lazy_lock="0" +fi +if test "x$enable_lazy_lock" = "x1" ; then + if test "x$have_dlsym" = "x1" ; then + AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ]) + else + AC_MSG_ERROR([Missing dlsym support: lazy-lock cannot be enabled.]) + fi +fi +AC_SUBST([enable_lazy_lock]) + +dnl Automatically configure TLS. +if test "x${force_tls}" = "x1" ; then + enable_tls="1" +elif test "x${force_tls}" = "x0" ; then + enable_tls="0" +else + enable_tls="1" +fi +if test "x${enable_tls}" = "x1" ; then +AC_MSG_CHECKING([for TLS]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM( +[[ + __thread int x; +]], [[ + x = 42; + + return 0; +]])], + AC_MSG_RESULT([yes]), + AC_MSG_RESULT([no]) + enable_tls="0") +else + enable_tls="0" +fi +AC_SUBST([enable_tls]) +if test "x${enable_tls}" = "x1" ; then + AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ]) +fi + +dnl ============================================================================ +dnl Check for C11 atomics. + +JE_COMPILABLE([C11 atomics], [ +#include +#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) +#include +#else +#error Atomics not available +#endif +], [ + uint64_t *p = (uint64_t *)0; + uint64_t x = 1; + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + uint64_t r = atomic_fetch_add(a, x) + x; + return r == 0; +], [je_cv_c11_atomics]) +if test "x${je_cv_c11_atomics}" = "xyes" ; then + AC_DEFINE([JEMALLOC_C11_ATOMICS]) +fi + +dnl ============================================================================ +dnl Check for GCC-style __atomic atomics. + +JE_COMPILABLE([GCC __atomic atomics], [ +], [ + int x = 0; + int val = 1; + int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED); + int after_add = x; + return after_add == 1; +], [je_cv_gcc_atomic_atomics]) +if test "x${je_cv_gcc_atomic_atomics}" = "xyes" ; then + AC_DEFINE([JEMALLOC_GCC_ATOMIC_ATOMICS]) + + dnl check for 8-bit atomic support + JE_COMPILABLE([GCC 8-bit __atomic atomics], [ + ], [ + unsigned char x = 0; + int val = 1; + int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED); + int after_add = (int)x; + return after_add == 1; + ], [je_cv_gcc_u8_atomic_atomics]) + if test "x${je_cv_gcc_u8_atomic_atomics}" = "xyes" ; then + AC_DEFINE([JEMALLOC_GCC_U8_ATOMIC_ATOMICS]) + fi +fi + +dnl ============================================================================ +dnl Check for GCC-style __sync atomics. + +JE_COMPILABLE([GCC __sync atomics], [ +], [ + int x = 0; + int before_add = __sync_fetch_and_add(&x, 1); + int after_add = x; + return (before_add == 0) && (after_add == 1); +], [je_cv_gcc_sync_atomics]) +if test "x${je_cv_gcc_sync_atomics}" = "xyes" ; then + AC_DEFINE([JEMALLOC_GCC_SYNC_ATOMICS]) + + dnl check for 8-bit atomic support + JE_COMPILABLE([GCC 8-bit __sync atomics], [ + ], [ + unsigned char x = 0; + int before_add = __sync_fetch_and_add(&x, 1); + int after_add = (int)x; + return (before_add == 0) && (after_add == 1); + ], [je_cv_gcc_u8_sync_atomics]) + if test "x${je_cv_gcc_u8_sync_atomics}" = "xyes" ; then + AC_DEFINE([JEMALLOC_GCC_U8_SYNC_ATOMICS]) + fi +fi + +dnl ============================================================================ +dnl Check for atomic(3) operations as provided on Darwin. +dnl We need this not for the atomic operations (which are provided above), but +dnl rather for the OS_unfair_lock type it exposes. + +JE_COMPILABLE([Darwin OSAtomic*()], [ +#include +#include +], [ + { + int32_t x32 = 0; + volatile int32_t *x32p = &x32; + OSAtomicAdd32(1, x32p); + } + { + int64_t x64 = 0; + volatile int64_t *x64p = &x64; + OSAtomicAdd64(1, x64p); + } +], [je_cv_osatomic]) +if test "x${je_cv_osatomic}" = "xyes" ; then + AC_DEFINE([JEMALLOC_OSATOMIC], [ ]) +fi + +dnl ============================================================================ +dnl Check for madvise(2). + +JE_COMPILABLE([madvise(2)], [ +#include +], [ + madvise((void *)0, 0, 0); +], [je_cv_madvise]) +if test "x${je_cv_madvise}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ]) + + dnl Check for madvise(..., MADV_FREE). + JE_COMPILABLE([madvise(..., MADV_FREE)], [ +#include +], [ + madvise((void *)0, 0, MADV_FREE); +], [je_cv_madv_free]) + if test "x${je_cv_madv_free}" = "xyes" ; then + AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + elif test "x${je_cv_madvise}" = "xyes" ; then + case "${host_cpu}" in i686|x86_64) + case "${host}" in *-*-linux*) + AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + AC_DEFINE([JEMALLOC_DEFINE_MADVISE_FREE], [ ]) + ;; + esac + ;; + esac + fi + + dnl Check for madvise(..., MADV_DONTNEED). + JE_COMPILABLE([madvise(..., MADV_DONTNEED)], [ +#include +], [ + madvise((void *)0, 0, MADV_DONTNEED); +], [je_cv_madv_dontneed]) + if test "x${je_cv_madv_dontneed}" = "xyes" ; then + AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ]) + fi + + dnl Check for madvise(..., MADV_DO[NT]DUMP). + JE_COMPILABLE([madvise(..., MADV_DO[[NT]]DUMP)], [ +#include +], [ + madvise((void *)0, 0, MADV_DONTDUMP); + madvise((void *)0, 0, MADV_DODUMP); +], [je_cv_madv_dontdump]) + if test "x${je_cv_madv_dontdump}" = "xyes" ; then + AC_DEFINE([JEMALLOC_MADVISE_DONTDUMP], [ ]) + fi + + dnl Check for madvise(..., MADV_[NO]HUGEPAGE). + JE_COMPILABLE([madvise(..., MADV_[[NO]]HUGEPAGE)], [ +#include +], [ + madvise((void *)0, 0, MADV_HUGEPAGE); + madvise((void *)0, 0, MADV_NOHUGEPAGE); +], [je_cv_thp]) +case "${host_cpu}" in + arm*) + ;; + *) + if test "x${je_cv_thp}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_MADVISE_HUGE], [ ]) + fi + ;; +esac +fi + +dnl ============================================================================ +dnl Check for __builtin_clz() and __builtin_clzl(). + +AC_CACHE_CHECK([for __builtin_clz], + [je_cv_builtin_clz], + [AC_LINK_IFELSE([AC_LANG_PROGRAM([], + [ + { + unsigned x = 0; + int y = __builtin_clz(x); + } + { + unsigned long x = 0; + int y = __builtin_clzl(x); + } + ])], + [je_cv_builtin_clz=yes], + [je_cv_builtin_clz=no])]) + +if test "x${je_cv_builtin_clz}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_BUILTIN_CLZ], [ ]) +fi + +dnl ============================================================================ +dnl Check for os_unfair_lock operations as provided on Darwin. + +JE_COMPILABLE([Darwin os_unfair_lock_*()], [ +#include +#include +], [ + #if MAC_OS_X_VERSION_MIN_REQUIRED < 101200 + #error "os_unfair_lock is not supported" + #else + os_unfair_lock lock = OS_UNFAIR_LOCK_INIT; + os_unfair_lock_lock(&lock); + os_unfair_lock_unlock(&lock); + #endif +], [je_cv_os_unfair_lock]) +if test "x${je_cv_os_unfair_lock}" = "xyes" ; then + AC_DEFINE([JEMALLOC_OS_UNFAIR_LOCK], [ ]) +fi + +dnl ============================================================================ +dnl Darwin-related configuration. + +AC_ARG_ENABLE([zone-allocator], + [AS_HELP_STRING([--disable-zone-allocator], + [Disable zone allocator for Darwin])], +[if test "x$enable_zone_allocator" = "xno" ; then + enable_zone_allocator="0" +else + enable_zone_allocator="1" +fi +], +[if test "x${abi}" = "xmacho"; then + enable_zone_allocator="1" +fi +] +) +AC_SUBST([enable_zone_allocator]) + +if test "x${enable_zone_allocator}" = "x1" ; then + if test "x${abi}" != "xmacho"; then + AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin]) + fi + AC_DEFINE([JEMALLOC_ZONE], [ ]) +fi + +dnl ============================================================================ +dnl Use initial-exec TLS by default. +AC_ARG_ENABLE([initial-exec-tls], + [AS_HELP_STRING([--disable-initial-exec-tls], + [Disable the initial-exec tls model])], +[if test "x$enable_initial_exec_tls" = "xno" ; then + enable_initial_exec_tls="0" +else + enable_initial_exec_tls="1" +fi +], +[enable_initial_exec_tls="1"] +) +AC_SUBST([enable_initial_exec_tls]) + +if test "x${je_cv_tls_model}" = "xyes" -a \ + "x${enable_initial_exec_tls}" = "x1" ; then + AC_DEFINE([JEMALLOC_TLS_MODEL], + [__attribute__((tls_model("initial-exec")))]) +else + AC_DEFINE([JEMALLOC_TLS_MODEL], [ ]) +fi + +dnl ============================================================================ +dnl Enable background threads if possible. + +if test "x${have_pthread}" = "x1" -a "x${je_cv_os_unfair_lock}" != "xyes" ; then + AC_DEFINE([JEMALLOC_BACKGROUND_THREAD]) +fi + +dnl ============================================================================ +dnl Check for glibc malloc hooks + +JE_COMPILABLE([glibc malloc hook], [ +#include + +extern void (* __free_hook)(void *ptr); +extern void *(* __malloc_hook)(size_t size); +extern void *(* __realloc_hook)(void *ptr, size_t size); +], [ + void *ptr = 0L; + if (__malloc_hook) ptr = __malloc_hook(1); + if (__realloc_hook) ptr = __realloc_hook(ptr, 2); + if (__free_hook && ptr) __free_hook(ptr); +], [je_cv_glibc_malloc_hook]) +if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then + if test "x${JEMALLOC_PREFIX}" = "x" ; then + AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ]) + wrap_syms="${wrap_syms} __free_hook __malloc_hook __realloc_hook" + fi +fi + +JE_COMPILABLE([glibc memalign hook], [ +#include + +extern void *(* __memalign_hook)(size_t alignment, size_t size); +], [ + void *ptr = 0L; + if (__memalign_hook) ptr = __memalign_hook(16, 7); +], [je_cv_glibc_memalign_hook]) +if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then + if test "x${JEMALLOC_PREFIX}" = "x" ; then + AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ]) + wrap_syms="${wrap_syms} __memalign_hook" + fi +fi + +JE_COMPILABLE([pthreads adaptive mutexes], [ +#include +], [ + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); + pthread_mutexattr_destroy(&attr); +], [je_cv_pthread_mutex_adaptive_np]) +if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ]) +fi + +JE_CFLAGS_SAVE() +JE_CFLAGS_ADD([-D_GNU_SOURCE]) +JE_CFLAGS_ADD([-Werror]) +JE_CFLAGS_ADD([-herror_on_warning]) +JE_COMPILABLE([strerror_r returns char with gnu source], [ +#include +#include +#include +#include +], [ + char *buffer = (char *) malloc(100); + char *error = strerror_r(EINVAL, buffer, 100); + printf("%s\n", error); +], [je_cv_strerror_r_returns_char_with_gnu_source]) +JE_CFLAGS_RESTORE() +if test "x${je_cv_strerror_r_returns_char_with_gnu_source}" = "xyes" ; then + AC_DEFINE([JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE], [ ]) +fi + +dnl ============================================================================ +dnl Check for typedefs, structures, and compiler characteristics. +AC_HEADER_STDBOOL + +dnl ============================================================================ +dnl Define commands that generate output files. + +AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [ + f="${objroot}include/jemalloc/internal/public_symbols.txt" + mkdir -p "${objroot}include/jemalloc/internal" + cp /dev/null "${f}" + for nm in `echo ${mangling_map} |tr ',' ' '` ; do + n=`echo ${nm} |tr ':' ' ' |awk '{print $[]1}'` + m=`echo ${nm} |tr ':' ' ' |awk '{print $[]2}'` + echo "${n}:${m}" >> "${f}" + dnl Remove name from public_syms so that it isn't redefined later. + public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '` + done + for sym in ${public_syms} ; do + n="${sym}" + m="${JEMALLOC_PREFIX}${sym}" + echo "${n}:${m}" >> "${f}" + done +], [ + srcdir="${srcdir}" + objroot="${objroot}" + mangling_map="${mangling_map}" + public_syms="${public_syms}" + JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols.awk], [ + f="${objroot}include/jemalloc/internal/private_symbols.awk" + mkdir -p "${objroot}include/jemalloc/internal" + export_syms=`for sym in ${public_syms}; do echo "${JEMALLOC_PREFIX}${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` + "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols.awk" +], [ + srcdir="${srcdir}" + objroot="${objroot}" + public_syms="${public_syms}" + wrap_syms="${wrap_syms}" + SYM_PREFIX="${SYM_PREFIX}" + JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols_jet.awk], [ + f="${objroot}include/jemalloc/internal/private_symbols_jet.awk" + mkdir -p "${objroot}include/jemalloc/internal" + export_syms=`for sym in ${public_syms}; do echo "jet_${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` + "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols_jet.awk" +], [ + srcdir="${srcdir}" + objroot="${objroot}" + public_syms="${public_syms}" + wrap_syms="${wrap_syms}" + SYM_PREFIX="${SYM_PREFIX}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [ + mkdir -p "${objroot}include/jemalloc/internal" + "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [ + mkdir -p "${objroot}include/jemalloc/internal" + "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [ + mkdir -p "${objroot}include/jemalloc" + cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_rename.h], [ + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle.h], [ + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle_jet.h], [ + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc.h], [ + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" + install_suffix="${install_suffix}" +]) + +dnl Process .in files. +AC_SUBST([cfghdrs_in]) +AC_SUBST([cfghdrs_out]) +AC_CONFIG_HEADERS([$cfghdrs_tup]) + +dnl ============================================================================ +dnl Generate outputs. + +AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof]) +AC_SUBST([cfgoutputs_in]) +AC_SUBST([cfgoutputs_out]) +AC_OUTPUT + +dnl ============================================================================ +dnl Print out the results of configuration. +AC_MSG_RESULT([===============================================================================]) +AC_MSG_RESULT([jemalloc version : ${jemalloc_version}]) +AC_MSG_RESULT([library revision : ${rev}]) +AC_MSG_RESULT([]) +AC_MSG_RESULT([CONFIG : ${CONFIG}]) +AC_MSG_RESULT([CC : ${CC}]) +AC_MSG_RESULT([CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}]) +AC_MSG_RESULT([SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}]) +AC_MSG_RESULT([EXTRA_CFLAGS : ${EXTRA_CFLAGS}]) +AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}]) +AC_MSG_RESULT([CXX : ${CXX}]) +AC_MSG_RESULT([CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}]) +AC_MSG_RESULT([SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}]) +AC_MSG_RESULT([EXTRA_CXXFLAGS : ${EXTRA_CXXFLAGS}]) +AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}]) +AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}]) +AC_MSG_RESULT([DSO_LDFLAGS : ${DSO_LDFLAGS}]) +AC_MSG_RESULT([LIBS : ${LIBS}]) +AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}]) +AC_MSG_RESULT([]) +AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}]) +AC_MSG_RESULT([XSLROOT : ${XSLROOT}]) +AC_MSG_RESULT([]) +AC_MSG_RESULT([PREFIX : ${PREFIX}]) +AC_MSG_RESULT([BINDIR : ${BINDIR}]) +AC_MSG_RESULT([DATADIR : ${DATADIR}]) +AC_MSG_RESULT([INCLUDEDIR : ${INCLUDEDIR}]) +AC_MSG_RESULT([LIBDIR : ${LIBDIR}]) +AC_MSG_RESULT([MANDIR : ${MANDIR}]) +AC_MSG_RESULT([]) +AC_MSG_RESULT([srcroot : ${srcroot}]) +AC_MSG_RESULT([abs_srcroot : ${abs_srcroot}]) +AC_MSG_RESULT([objroot : ${objroot}]) +AC_MSG_RESULT([abs_objroot : ${abs_objroot}]) +AC_MSG_RESULT([]) +AC_MSG_RESULT([JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}]) +AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE]) +AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}]) +AC_MSG_RESULT([install_suffix : ${install_suffix}]) +AC_MSG_RESULT([malloc_conf : ${config_malloc_conf}]) +AC_MSG_RESULT([documentation : ${enable_doc}]) +AC_MSG_RESULT([shared libs : ${enable_shared}]) +AC_MSG_RESULT([static libs : ${enable_static}]) +AC_MSG_RESULT([autogen : ${enable_autogen}]) +AC_MSG_RESULT([debug : ${enable_debug}]) +AC_MSG_RESULT([stats : ${enable_stats}]) +AC_MSG_RESULT([experimetal_smallocx : ${enable_experimental_smallocx}]) +AC_MSG_RESULT([prof : ${enable_prof}]) +AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}]) +AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}]) +AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}]) +AC_MSG_RESULT([fill : ${enable_fill}]) +AC_MSG_RESULT([utrace : ${enable_utrace}]) +AC_MSG_RESULT([xmalloc : ${enable_xmalloc}]) +AC_MSG_RESULT([log : ${enable_log}]) +AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}]) +AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}]) +AC_MSG_RESULT([cxx : ${enable_cxx}]) +AC_MSG_RESULT([===============================================================================]) diff --git a/deps/jemalloc/doc/html.xsl.in b/deps/jemalloc/doc/html.xsl.in new file mode 100644 index 0000000..ec4fa65 --- /dev/null +++ b/deps/jemalloc/doc/html.xsl.in @@ -0,0 +1,5 @@ + + + + + diff --git a/deps/jemalloc/doc/jemalloc.xml.in b/deps/jemalloc/doc/jemalloc.xml.in new file mode 100644 index 0000000..7fecda7 --- /dev/null +++ b/deps/jemalloc/doc/jemalloc.xml.in @@ -0,0 +1,3513 @@ + + + + + + + User Manual + jemalloc + @jemalloc_version@ + + + Jason + Evans + Author + + + + + JEMALLOC + 3 + + + jemalloc + jemalloc + + general purpose memory allocation functions + + + LIBRARY + This manual describes jemalloc @jemalloc_version@. More information + can be found at the jemalloc website. + + + SYNOPSIS + + #include <jemalloc/jemalloc.h> + + Standard API + + void *malloc + size_t size + + + void *calloc + size_t number + size_t size + + + int posix_memalign + void **ptr + size_t alignment + size_t size + + + void *aligned_alloc + size_t alignment + size_t size + + + void *realloc + void *ptr + size_t size + + + void free + void *ptr + + + + Non-standard API + + void *mallocx + size_t size + int flags + + + void *rallocx + void *ptr + size_t size + int flags + + + size_t xallocx + void *ptr + size_t size + size_t extra + int flags + + + size_t sallocx + void *ptr + int flags + + + void dallocx + void *ptr + int flags + + + void sdallocx + void *ptr + size_t size + int flags + + + size_t nallocx + size_t size + int flags + + + int mallctl + const char *name + void *oldp + size_t *oldlenp + void *newp + size_t newlen + + + int mallctlnametomib + const char *name + size_t *mibp + size_t *miblenp + + + int mallctlbymib + const size_t *mib + size_t miblen + void *oldp + size_t *oldlenp + void *newp + size_t newlen + + + void malloc_stats_print + void (*write_cb) + void *, const char * + + void *cbopaque + const char *opts + + + size_t malloc_usable_size + const void *ptr + + + void (*malloc_message) + void *cbopaque + const char *s + + const char *malloc_conf; + + + + + DESCRIPTION + + Standard API + + The malloc() function allocates + size bytes of uninitialized memory. The allocated + space is suitably aligned (after possible pointer coercion) for storage + of any type of object. + + The calloc() function allocates + space for number objects, each + size bytes in length. The result is identical to + calling malloc() with an argument of + number * size, with the + exception that the allocated memory is explicitly initialized to zero + bytes. + + The posix_memalign() function + allocates size bytes of memory such that the + allocation's base address is a multiple of + alignment, and returns the allocation in the value + pointed to by ptr. The requested + alignment must be a power of 2 at least as large as + sizeof(void *). + + The aligned_alloc() function + allocates size bytes of memory such that the + allocation's base address is a multiple of + alignment. The requested + alignment must be a power of 2. Behavior is + undefined if size is not an integral multiple of + alignment. + + The realloc() function changes the + size of the previously allocated memory referenced by + ptr to size bytes. The + contents of the memory are unchanged up to the lesser of the new and old + sizes. If the new size is larger, the contents of the newly allocated + portion of the memory are undefined. Upon success, the memory referenced + by ptr is freed and a pointer to the newly + allocated memory is returned. Note that + realloc() may move the memory allocation, + resulting in a different return value than ptr. + If ptr is NULL, the + realloc() function behaves identically to + malloc() for the specified size. + + The free() function causes the + allocated memory referenced by ptr to be made + available for future allocations. If ptr is + NULL, no action occurs. + + + Non-standard API + The mallocx(), + rallocx(), + xallocx(), + sallocx(), + dallocx(), + sdallocx(), and + nallocx() functions all have a + flags argument that can be used to specify + options. The functions only check the options that are contextually + relevant. Use bitwise or (|) operations to + specify one or more of the following: + + + MALLOCX_LG_ALIGN(la) + + + Align the memory allocation to start at an address + that is a multiple of (1 << + la). This macro does not validate + that la is within the valid + range. + + + MALLOCX_ALIGN(a) + + + Align the memory allocation to start at an address + that is a multiple of a, where + a is a power of two. This macro does not + validate that a is a power of 2. + + + + MALLOCX_ZERO + + Initialize newly allocated memory to contain zero + bytes. In the growing reallocation case, the real size prior to + reallocation defines the boundary between untouched bytes and those + that are initialized to contain zero bytes. If this macro is + absent, newly allocated memory is uninitialized. + + + MALLOCX_TCACHE(tc) + + + Use the thread-specific cache (tcache) specified by + the identifier tc, which must have been + acquired via the tcache.create + mallctl. This macro does not validate that + tc specifies a valid + identifier. + + + MALLOCX_TCACHE_NONE + + Do not use a thread-specific cache (tcache). Unless + MALLOCX_TCACHE(tc) or + MALLOCX_TCACHE_NONE is specified, an + automatically managed tcache will be used under many circumstances. + This macro cannot be used in the same flags + argument as + MALLOCX_TCACHE(tc). + + + MALLOCX_ARENA(a) + + + Use the arena specified by the index + a. This macro has no effect for regions that + were allocated via an arena other than the one specified. This + macro does not validate that a specifies an + arena index in the valid range. + + + + + The mallocx() function allocates at + least size bytes of memory, and returns a pointer + to the base address of the allocation. Behavior is undefined if + size is 0. + + The rallocx() function resizes the + allocation at ptr to be at least + size bytes, and returns a pointer to the base + address of the resulting allocation, which may or may not have moved from + its original location. Behavior is undefined if + size is 0. + + The xallocx() function resizes the + allocation at ptr in place to be at least + size bytes, and returns the real size of the + allocation. If extra is non-zero, an attempt is + made to resize the allocation to be at least (size + + extra) bytes, though inability to allocate + the extra byte(s) will not by itself result in failure to resize. + Behavior is undefined if size is + 0, or if (size + extra + > SIZE_T_MAX). + + The sallocx() function returns the + real size of the allocation at ptr. + + The dallocx() function causes the + memory referenced by ptr to be made available for + future allocations. + + The sdallocx() function is an + extension of dallocx() with a + size parameter to allow the caller to pass in the + allocation size as an optimization. The minimum valid input size is the + original requested size of the allocation, and the maximum valid input + size is the corresponding value returned by + nallocx() or + sallocx(). + + The nallocx() function allocates no + memory, but it performs the same size computation as the + mallocx() function, and returns the real + size of the allocation that would result from the equivalent + mallocx() function call, or + 0 if the inputs exceed the maximum supported size + class and/or alignment. Behavior is undefined if + size is 0. + + The mallctl() function provides a + general interface for introspecting the memory allocator, as well as + setting modifiable parameters and triggering actions. The + period-separated name argument specifies a + location in a tree-structured namespace; see the section for + documentation on the tree contents. To read a value, pass a pointer via + oldp to adequate space to contain the value, and a + pointer to its length via oldlenp; otherwise pass + NULL and NULL. Similarly, to + write a value, pass a pointer to the value via + newp, and its length via + newlen; otherwise pass NULL + and 0. + + The mallctlnametomib() function + provides a way to avoid repeated name lookups for applications that + repeatedly query the same portion of the namespace, by translating a name + to a Management Information Base (MIB) that can be passed + repeatedly to mallctlbymib(). Upon + successful return from mallctlnametomib(), + mibp contains an array of + *miblenp integers, where + *miblenp is the lesser of the number of components + in name and the input value of + *miblenp. Thus it is possible to pass a + *miblenp that is smaller than the number of + period-separated name components, which results in a partial MIB that can + be used as the basis for constructing a complete MIB. For name + components that are integers (e.g. the 2 in + arenas.bin.2.size), + the corresponding MIB component will always be that integer. Therefore, + it is legitimate to construct code like the following: + + + + The malloc_stats_print() function writes + summary statistics via the write_cb callback + function pointer and cbopaque data passed to + write_cb, or malloc_message() + if write_cb is NULL. The + statistics are presented in human-readable form unless J is + specified as a character within the opts string, in + which case the statistics are presented in JSON format. This function can be + called repeatedly. General information that never changes during + execution can be omitted by specifying g as a character + within the opts string. Note that + malloc_stats_print() uses the + mallctl*() functions internally, so inconsistent + statistics can be reported if multiple threads use these functions + simultaneously. If is specified during + configuration, m, d, and a + can be specified to omit merged arena, destroyed merged arena, and per + arena statistics, respectively; b and l can + be specified to omit per size class statistics for bins and large objects, + respectively; x can be specified to omit all mutex + statistics; e can be used to omit extent statistics. + Unrecognized characters are silently ignored. Note that thread caching + may prevent some statistics from being completely up to date, since extra + locking would be required to merge counters that track thread cache + operations. + + The malloc_usable_size() function + returns the usable size of the allocation pointed to by + ptr. The return value may be larger than the size + that was requested during allocation. The + malloc_usable_size() function is not a + mechanism for in-place realloc(); rather + it is provided solely as a tool for introspection purposes. Any + discrepancy between the requested allocation size and the size reported + by malloc_usable_size() should not be + depended on, since such behavior is entirely implementation-dependent. + + + + + TUNING + Once, when the first call is made to one of the memory allocation + routines, the allocator initializes its internals based in part on various + options that can be specified at compile- or run-time. + + The string specified via , the + string pointed to by the global variable malloc_conf, the + name of the file referenced by the symbolic link named + /etc/malloc.conf, and the value of the + environment variable MALLOC_CONF, will be interpreted, in + that order, from left to right as options. Note that + malloc_conf may be read before + main() is entered, so the declaration of + malloc_conf should specify an initializer that contains + the final value to be read by jemalloc. + and malloc_conf are compile-time mechanisms, whereas + /etc/malloc.conf and + MALLOC_CONF can be safely set any time prior to program + invocation. + + An options string is a comma-separated list of option:value pairs. + There is one key corresponding to each opt.* mallctl (see the section for options + documentation). For example, abort:true,narenas:1 sets + the opt.abort and opt.narenas options. Some + options have boolean values (true/false), others have integer values (base + 8, 10, or 16, depending on prefix), and yet others have raw string + values. + + + IMPLEMENTATION NOTES + Traditionally, allocators have used + sbrk + 2 to obtain memory, which is + suboptimal for several reasons, including race conditions, increased + fragmentation, and artificial limitations on maximum usable memory. If + sbrk + 2 is supported by the operating + system, this allocator uses both + mmap + 2 and + sbrk + 2, in that order of preference; + otherwise only mmap + 2 is used. + + This allocator uses multiple arenas in order to reduce lock + contention for threaded programs on multi-processor systems. This works + well with regard to threading scalability, but incurs some costs. There is + a small fixed per-arena overhead, and additionally, arenas manage memory + completely independently of each other, which means a small fixed increase + in overall memory fragmentation. These overheads are not generally an + issue, given the number of arenas normally used. Note that using + substantially more arenas than the default is not likely to improve + performance, mainly due to reduced cache performance. However, it may make + sense to reduce the number of arenas if an application does not make much + use of the allocation functions. + + In addition to multiple arenas, this allocator supports + thread-specific caching, in order to make it possible to completely avoid + synchronization for most allocation requests. Such caching allows very fast + allocation in the common case, but it increases memory usage and + fragmentation, since a bounded number of objects can remain allocated in + each thread cache. + + Memory is conceptually broken into extents. Extents are always + aligned to multiples of the page size. This alignment makes it possible to + find metadata for user objects quickly. User objects are broken into two + categories according to size: small and large. Contiguous small objects + comprise a slab, which resides within a single extent, whereas large objects + each have their own extents backing them. + + Small objects are managed in groups by slabs. Each slab maintains + a bitmap to track which regions are in use. Allocation requests that are no + more than half the quantum (8 or 16, depending on architecture) are rounded + up to the nearest power of two that is at least sizeof(double). All other object size + classes are multiples of the quantum, spaced such that there are four size + classes for each doubling in size, which limits internal fragmentation to + approximately 20% for all but the smallest size classes. Small size classes + are smaller than four times the page size, and large size classes extend + from four times the page size up to the largest size class that does not + exceed PTRDIFF_MAX. + + Allocations are packed tightly together, which can be an issue for + multi-threaded applications. If you need to assure that allocations do not + suffer from cacheline sharing, round your allocation requests up to the + nearest multiple of the cacheline size, or specify cacheline alignment when + allocating. + + The realloc(), + rallocx(), and + xallocx() functions may resize allocations + without moving them under limited circumstances. Unlike the + *allocx() API, the standard API does not + officially round up the usable size of an allocation to the nearest size + class, so technically it is necessary to call + realloc() to grow e.g. a 9-byte allocation to + 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage + trivially succeeds in place as long as the pre-size and post-size both round + up to the same size class. No other API guarantees are made regarding + in-place resizing, but the current implementation also tries to resize large + allocations in place, as long as the pre-size and post-size are both large. + For shrinkage to succeed, the extent allocator must support splitting (see + arena.<i>.extent_hooks). + Growth only succeeds if the trailing memory is currently available, and the + extent allocator supports merging. + + Assuming 4 KiB pages and a 16-byte quantum on a 64-bit system, the + size classes in each category are as shown in . + + + Size classes + + + + + + + Category + Spacing + Size + + + + + Small + lg + [8] + + + 16 + [16, 32, 48, 64, 80, 96, 112, 128] + + + 32 + [160, 192, 224, 256] + + + 64 + [320, 384, 448, 512] + + + 128 + [640, 768, 896, 1024] + + + 256 + [1280, 1536, 1792, 2048] + + + 512 + [2560, 3072, 3584, 4096] + + + 1 KiB + [5 KiB, 6 KiB, 7 KiB, 8 KiB] + + + 2 KiB + [10 KiB, 12 KiB, 14 KiB] + + + Large + 2 KiB + [16 KiB] + + + 4 KiB + [20 KiB, 24 KiB, 28 KiB, 32 KiB] + + + 8 KiB + [40 KiB, 48 KiB, 54 KiB, 64 KiB] + + + 16 KiB + [80 KiB, 96 KiB, 112 KiB, 128 KiB] + + + 32 KiB + [160 KiB, 192 KiB, 224 KiB, 256 KiB] + + + 64 KiB + [320 KiB, 384 KiB, 448 KiB, 512 KiB] + + + 128 KiB + [640 KiB, 768 KiB, 896 KiB, 1 MiB] + + + 256 KiB + [1280 KiB, 1536 KiB, 1792 KiB, 2 MiB] + + + 512 KiB + [2560 KiB, 3 MiB, 3584 KiB, 4 MiB] + + + 1 MiB + [5 MiB, 6 MiB, 7 MiB, 8 MiB] + + + 2 MiB + [10 MiB, 12 MiB, 14 MiB, 16 MiB] + + + 4 MiB + [20 MiB, 24 MiB, 28 MiB, 32 MiB] + + + 8 MiB + [40 MiB, 48 MiB, 56 MiB, 64 MiB] + + + ... + ... + + + 512 PiB + [2560 PiB, 3 EiB, 3584 PiB, 4 EiB] + + + 1 EiB + [5 EiB, 6 EiB, 7 EiB] + + + +
+
+ + MALLCTL NAMESPACE + The following names are defined in the namespace accessible via the + mallctl*() functions. Value types are specified in + parentheses, their readable/writable statuses are encoded as + rw, r-, -w, or + --, and required build configuration flags follow, if + any. A name element encoded as <i> or + <j> indicates an integer component, where the + integer varies from 0 to some upper value that must be determined via + introspection. In the case of stats.arenas.<i>.* + and arena.<i>.{initialized,purge,decay,dss}, + <i> equal to + MALLCTL_ARENAS_ALL can be used to operate on all arenas + or access the summation of statistics from all arenas; similarly + <i> equal to + MALLCTL_ARENAS_DESTROYED can be used to access the + summation of statistics from all destroyed arenas. These constants can be + utilized either via mallctlnametomib() followed by + mallctlbymib(), or via code such as the following: + + Take special note of the epoch mallctl, which controls + refreshing of cached dynamic statistics. + + + + + version + (const char *) + r- + + Return the jemalloc version string. + + + + + epoch + (uint64_t) + rw + + If a value is passed in, refresh the data from which + the mallctl*() functions report values, + and increment the epoch. Return the current epoch. This is useful for + detecting whether another thread caused a refresh. + + + + + background_thread + (bool) + rw + + Enable/disable internal background worker threads. When + set to true, background threads are created on demand (the number of + background threads will be no more than the number of CPUs or active + arenas). Threads run periodically, and handle purging asynchronously. When switching + off, background threads are terminated synchronously. Note that after + fork2 + function, the state in the child process will be disabled regardless + the state in parent process. See stats.background_thread + for related stats. opt.background_thread + can be used to set the default option. This option is only available on + selected pthread-based platforms. + + + + + max_background_threads + (size_t) + rw + + Maximum number of background worker threads that will + be created. This value is capped at opt.max_background_threads at + startup. + + + + + config.cache_oblivious + (bool) + r- + + was specified + during build configuration. + + + + + config.debug + (bool) + r- + + was specified during + build configuration. + + + + + config.fill + (bool) + r- + + was specified during + build configuration. + + + + + config.lazy_lock + (bool) + r- + + was specified + during build configuration. + + + + + config.malloc_conf + (const char *) + r- + + Embedded configure-time-specified run-time options + string, empty unless was specified + during build configuration. + + + + + config.prof + (bool) + r- + + was specified during + build configuration. + + + + + config.prof_libgcc + (bool) + r- + + was not + specified during build configuration. + + + + + config.prof_libunwind + (bool) + r- + + was specified + during build configuration. + + + + + config.stats + (bool) + r- + + was specified during + build configuration. + + + + + + config.utrace + (bool) + r- + + was specified during + build configuration. + + + + + config.xmalloc + (bool) + r- + + was specified during + build configuration. + + + + + opt.abort + (bool) + r- + + Abort-on-warning enabled/disabled. If true, most + warnings are fatal. Note that runtime option warnings are not included + (see opt.abort_conf for + that). The process will call + abort + 3 in these cases. This option is + disabled by default unless is + specified during configuration, in which case it is enabled by default. + + + + + + opt.confirm_conf + (bool) + r- + + Confirm-runtime-options-when-program-starts + enabled/disabled. If true, the string specified via + , the string pointed to by the + global variable malloc_conf, the name + of the file referenced by the symbolic link named + /etc/malloc.conf, and the value of + the environment variable MALLOC_CONF, will be printed in + order. Then, each option being set will be individually printed. This + option is disabled by default. + + + + + opt.abort_conf + (bool) + r- + + Abort-on-invalid-configuration enabled/disabled. If + true, invalid runtime options are fatal. The process will call + abort + 3 in these cases. This option is + disabled by default unless is + specified during configuration, in which case it is enabled by default. + + + + + + opt.metadata_thp + (const char *) + r- + + Controls whether to allow jemalloc to use transparent + huge page (THP) for internal metadata (see stats.metadata). always + allows such usage. auto uses no THP initially, but may + begin to do so when metadata usage reaches certain level. The default + is disabled. + + + + + opt.retain + (bool) + r- + + If true, retain unused virtual memory for later reuse + rather than discarding it by calling + munmap + 2 or equivalent (see stats.retained for related details). + It also makes jemalloc use + mmap2 + or equivalent in a more greedy way, mapping larger + chunks in one go. This option is disabled by default unless discarding + virtual memory is known to trigger platform-specific performance + problems, namely 1) for [64-bit] Linux, which has a quirk in its virtual + memory allocation algorithm that causes semi-permanent VM map holes + under normal jemalloc operation; and 2) for [64-bit] Windows, which + disallows split / merged regions with + MEM_RELEASE. Although the + same issues may present on 32-bit platforms as well, retaining virtual + memory for 32-bit Linux and Windows is disabled by default due to the + practical possibility of address space exhaustion. + + + + + opt.dss + (const char *) + r- + + dss (sbrk + 2) allocation precedence as + related to mmap + 2 allocation. The following + settings are supported if + sbrk + 2 is supported by the operating + system: disabled, primary, and + secondary; otherwise only disabled is + supported. The default is secondary if + sbrk + 2 is supported by the operating + system; disabled otherwise. + + + + + + opt.narenas + (unsigned) + r- + + Maximum number of arenas to use for automatic + multiplexing of threads and arenas. The default is four times the + number of CPUs, or one if there is a single CPU. + + + + + opt.oversize_threshold + (size_t) + r- + + The threshold in bytes of which requests are considered + oversize. Allocation requests with greater sizes are fulfilled from a + dedicated arena (automatically managed, however not within + narenas), in order to reduce fragmentation by not + mixing huge allocations with small ones. In addition, the decay API + guarantees on the extents greater than the specified threshold may be + overridden. Note that requests with arena index specified via + MALLOCX_ARENA, or threads associated with explicit + arenas will not be considered. The default threshold is 8MiB. Values + not within large size classes disables this feature. + + + + + opt.percpu_arena + (const char *) + r- + + Per CPU arena mode. Use the percpu + setting to enable this feature, which uses number of CPUs to determine + number of arenas, and bind threads to arenas dynamically based on the + CPU the thread runs on currently. phycpu setting uses + one arena per physical CPU, which means the two hyper threads on the + same CPU share one arena. Note that no runtime checking regarding the + availability of hyper threading is done at the moment. When set to + disabled, narenas and thread to arena association will + not be impacted by this option. The default is disabled. + + + + + + opt.background_thread + (bool) + r- + + Internal background worker threads enabled/disabled. + Because of potential circular dependencies, enabling background thread + using this option may cause crash or deadlock during initialization. For + a reliable way to use this feature, see background_thread for dynamic control + options and details. This option is disabled by + default. + + + + + opt.max_background_threads + (size_t) + r- + + Maximum number of background threads that will be created + if background_thread is set. + Defaults to number of cpus. + + + + + opt.dirty_decay_ms + (ssize_t) + r- + + Approximate time in milliseconds from the creation of a + set of unused dirty pages until an equivalent set of unused dirty pages + is purged (i.e. converted to muzzy via e.g. + madvise(...MADV_FREE) + if supported by the operating system, or converted to clean otherwise) + and/or reused. Dirty pages are defined as previously having been + potentially written to by the application, and therefore consuming + physical memory, yet having no current use. The pages are incrementally + purged according to a sigmoidal decay curve that starts and ends with + zero purge rate. A decay time of 0 causes all unused dirty pages to be + purged immediately upon creation. A decay time of -1 disables purging. + The default decay time is 10 seconds. See arenas.dirty_decay_ms + and arena.<i>.dirty_decay_ms + for related dynamic control options. See opt.muzzy_decay_ms + for a description of muzzy pages.for a description of muzzy pages. Note + that when the oversize_threshold + feature is enabled, the arenas reserved for oversize requests may have + its own default decay settings. + + + + + opt.muzzy_decay_ms + (ssize_t) + r- + + Approximate time in milliseconds from the creation of a + set of unused muzzy pages until an equivalent set of unused muzzy pages + is purged (i.e. converted to clean) and/or reused. Muzzy pages are + defined as previously having been unused dirty pages that were + subsequently purged in a manner that left them subject to the + reclamation whims of the operating system (e.g. + madvise(...MADV_FREE)), + and therefore in an indeterminate state. The pages are incrementally + purged according to a sigmoidal decay curve that starts and ends with + zero purge rate. A decay time of 0 causes all unused muzzy pages to be + purged immediately upon creation. A decay time of -1 disables purging. + The default decay time is 10 seconds. See arenas.muzzy_decay_ms + and arena.<i>.muzzy_decay_ms + for related dynamic control options. + + + + + opt.lg_extent_max_active_fit + (size_t) + r- + + When reusing dirty extents, this determines the (log + base 2 of the) maximum ratio between the size of the active extent + selected (to split off from) and the size of the requested allocation. + This prevents the splitting of large active extents for smaller + allocations, which can reduce fragmentation over the long run + (especially for non-active extents). Lower value may reduce + fragmentation, at the cost of extra active extents. The default value + is 6, which gives a maximum ratio of 64 (2^6). + + + + + opt.stats_print + (bool) + r- + + Enable/disable statistics printing at exit. If + enabled, the malloc_stats_print() + function is called at program exit via an + atexit + 3 function. opt.stats_print_opts + can be combined to specify output options. If + is specified during configuration, this + has the potential to cause deadlock for a multi-threaded process that + exits while one or more threads are executing in the memory allocation + functions. Furthermore, atexit() may + allocate memory during application initialization and then deadlock + internally when jemalloc in turn calls + atexit(), so this option is not + universally usable (though the application can register its own + atexit() function with equivalent + functionality). Therefore, this option should only be used with care; + it is primarily intended as a performance tuning aid during application + development. This option is disabled by default. + + + + + opt.stats_print_opts + (const char *) + r- + + Options (the opts string) to pass + to the malloc_stats_print() at exit (enabled + through opt.stats_print). See + available options in malloc_stats_print(). + Has no effect unless opt.stats_print is + enabled. The default is . + + + + + opt.junk + (const char *) + r- + [] + + Junk filling. If set to alloc, each byte + of uninitialized allocated memory will be initialized to + 0xa5. If set to free, all deallocated + memory will be initialized to 0x5a. If set to + true, both allocated and deallocated memory will be + initialized, and if set to false, junk filling be + disabled entirely. This is intended for debugging and will impact + performance negatively. This option is false by default + unless is specified during + configuration, in which case it is true by + default. + + + + + opt.zero + (bool) + r- + [] + + Zero filling enabled/disabled. If enabled, each byte + of uninitialized allocated memory will be initialized to 0. Note that + this initialization only happens once for each byte, so + realloc() and + rallocx() calls do not zero memory that + was previously allocated. This is intended for debugging and will + impact performance negatively. This option is disabled by default. + + + + + + opt.utrace + (bool) + r- + [] + + Allocation tracing based on + utrace + 2 enabled/disabled. This option + is disabled by default. + + + + + opt.xmalloc + (bool) + r- + [] + + Abort-on-out-of-memory enabled/disabled. If enabled, + rather than returning failure for any allocation function, display a + diagnostic message on STDERR_FILENO and cause the + program to drop core (using + abort + 3). If an application is + designed to depend on this behavior, set the option at compile time by + including the following in the source code: + + This option is disabled by default. + + + + + opt.tcache + (bool) + r- + + Thread-specific caching (tcache) enabled/disabled. When + there are multiple threads, each thread uses a tcache for objects up to + a certain size. Thread-specific caching allows many allocations to be + satisfied without performing any thread synchronization, at the cost of + increased memory use. See the opt.lg_tcache_max + option for related tuning information. This option is enabled by + default. + + + + + opt.lg_tcache_max + (size_t) + r- + + Maximum size class (log base 2) to cache in the + thread-specific cache (tcache). At a minimum, all small size classes + are cached, and at a maximum all large size classes are cached. The + default maximum is 32 KiB (2^15). + + + + + opt.thp + (const char *) + r- + + Transparent hugepage (THP) mode. Settings "always", + "never" and "default" are available if THP is supported by the operating + system. The "always" setting enables transparent hugepage for all user + memory mappings with + MADV_HUGEPAGE; "never" + ensures no transparent hugepage with + MADV_NOHUGEPAGE; the default + setting "default" makes no changes. Note that: this option does not + affect THP for jemalloc internal metadata (see opt.metadata_thp); + in addition, for arenas with customized extent_hooks, + this option is bypassed as it is implemented as part of the default + extent hooks. + + + + + opt.prof + (bool) + r- + [] + + Memory profiling enabled/disabled. If enabled, profile + memory allocation activity. See the opt.prof_active + option for on-the-fly activation/deactivation. See the opt.lg_prof_sample + option for probabilistic sampling control. See the opt.prof_accum + option for control of cumulative sample reporting. See the opt.lg_prof_interval + option for information on interval-triggered profile dumping, the opt.prof_gdump + option for information on high-water-triggered profile dumping, and the + opt.prof_final + option for final profile dumping. Profile output is compatible with + the jeprof command, which is based on the + pprof that is developed as part of the gperftools + package. See HEAP PROFILE + FORMAT for heap profile format documentation. + + + + + opt.prof_prefix + (const char *) + r- + [] + + Filename prefix for profile dumps. If the prefix is + set to the empty string, no automatic dumps will occur; this is + primarily useful for disabling the automatic final heap dump (which + also disables leak reporting, if enabled). The default prefix is + jeprof. + + + + + opt.prof_active + (bool) + r- + [] + + Profiling activated/deactivated. This is a secondary + control mechanism that makes it possible to start the application with + profiling enabled (see the opt.prof option) but + inactive, then toggle profiling at any time during program execution + with the prof.active mallctl. + This option is enabled by default. + + + + + opt.prof_thread_active_init + (bool) + r- + [] + + Initial setting for thread.prof.active + in newly created threads. The initial setting for newly created threads + can also be changed during execution via the prof.thread_active_init + mallctl. This option is enabled by default. + + + + + opt.lg_prof_sample + (size_t) + r- + [] + + Average interval (log base 2) between allocation + samples, as measured in bytes of allocation activity. Increasing the + sampling interval decreases profile fidelity, but also decreases the + computational overhead. The default sample interval is 512 KiB (2^19 + B). + + + + + opt.prof_accum + (bool) + r- + [] + + Reporting of cumulative object/byte counts in profile + dumps enabled/disabled. If this option is enabled, every unique + backtrace must be stored for the duration of execution. Depending on + the application, this can impose a large memory overhead, and the + cumulative counts are not always of interest. This option is disabled + by default. + + + + + opt.lg_prof_interval + (ssize_t) + r- + [] + + Average interval (log base 2) between memory profile + dumps, as measured in bytes of allocation activity. The actual + interval between dumps may be sporadic because decentralized allocation + counters are used to avoid synchronization bottlenecks. Profiles are + dumped to files named according to the pattern + <prefix>.<pid>.<seq>.i<iseq>.heap, + where <prefix> is controlled by the + opt.prof_prefix + option. By default, interval-triggered profile dumping is disabled + (encoded as -1). + + + + + + opt.prof_gdump + (bool) + r- + [] + + Set the initial state of prof.gdump, which when + enabled triggers a memory profile dump every time the total virtual + memory exceeds the previous maximum. This option is disabled by + default. + + + + + opt.prof_final + (bool) + r- + [] + + Use an + atexit + 3 function to dump final memory + usage to a file named according to the pattern + <prefix>.<pid>.<seq>.f.heap, + where <prefix> is controlled by the opt.prof_prefix + option. Note that atexit() may allocate + memory during application initialization and then deadlock internally + when jemalloc in turn calls atexit(), so + this option is not universally usable (though the application can + register its own atexit() function with + equivalent functionality). This option is disabled by + default. + + + + + opt.prof_leak + (bool) + r- + [] + + Leak reporting enabled/disabled. If enabled, use an + atexit + 3 function to report memory leaks + detected by allocation sampling. See the + opt.prof option for + information on analyzing heap profile output. This option is disabled + by default. + + + + + thread.arena + (unsigned) + rw + + Get or set the arena associated with the calling + thread. If the specified arena was not initialized beforehand (see the + arena.i.initialized + mallctl), it will be automatically initialized as a side effect of + calling this interface. + + + + + thread.allocated + (uint64_t) + r- + [] + + Get the total number of bytes ever allocated by the + calling thread. This counter has the potential to wrap around; it is + up to the application to appropriately interpret the counter in such + cases. + + + + + thread.allocatedp + (uint64_t *) + r- + [] + + Get a pointer to the the value that is returned by the + thread.allocated + mallctl. This is useful for avoiding the overhead of repeated + mallctl*() calls. + + + + + thread.deallocated + (uint64_t) + r- + [] + + Get the total number of bytes ever deallocated by the + calling thread. This counter has the potential to wrap around; it is + up to the application to appropriately interpret the counter in such + cases. + + + + + thread.deallocatedp + (uint64_t *) + r- + [] + + Get a pointer to the the value that is returned by the + thread.deallocated + mallctl. This is useful for avoiding the overhead of repeated + mallctl*() calls. + + + + + thread.tcache.enabled + (bool) + rw + + Enable/disable calling thread's tcache. The tcache is + implicitly flushed as a side effect of becoming + disabled (see thread.tcache.flush). + + + + + + thread.tcache.flush + (void) + -- + + Flush calling thread's thread-specific cache (tcache). + This interface releases all cached objects and internal data structures + associated with the calling thread's tcache. Ordinarily, this interface + need not be called, since automatic periodic incremental garbage + collection occurs, and the thread cache is automatically discarded when + a thread exits. However, garbage collection is triggered by allocation + activity, so it is possible for a thread that stops + allocating/deallocating to retain its cache indefinitely, in which case + the developer may find manual flushing useful. + + + + + thread.prof.name + (const char *) + r- or + -w + [] + + Get/set the descriptive name associated with the calling + thread in memory profile dumps. An internal copy of the name string is + created, so the input string need not be maintained after this interface + completes execution. The output string of this interface should be + copied for non-ephemeral uses, because multiple implementation details + can cause asynchronous string deallocation. Furthermore, each + invocation of this interface can only read or write; simultaneous + read/write is not supported due to string lifetime limitations. The + name string must be nil-terminated and comprised only of characters in + the sets recognized + by isgraph + 3 and + isblank + 3. + + + + + thread.prof.active + (bool) + rw + [] + + Control whether sampling is currently active for the + calling thread. This is an activation mechanism in addition to prof.active; both must + be active for the calling thread to sample. This flag is enabled by + default. + + + + + tcache.create + (unsigned) + r- + + Create an explicit thread-specific cache (tcache) and + return an identifier that can be passed to the MALLOCX_TCACHE(tc) + macro to explicitly use the specified cache rather than the + automatically managed one that is used by default. Each explicit cache + can be used by only one thread at a time; the application must assure + that this constraint holds. + + + + + + tcache.flush + (unsigned) + -w + + Flush the specified thread-specific cache (tcache). The + same considerations apply to this interface as to thread.tcache.flush, + except that the tcache will never be automatically discarded. + + + + + + tcache.destroy + (unsigned) + -w + + Flush the specified thread-specific cache (tcache) and + make the identifier available for use during a future tcache creation. + + + + + + arena.<i>.initialized + (bool) + r- + + Get whether the specified arena's statistics are + initialized (i.e. the arena was initialized prior to the current epoch). + This interface can also be nominally used to query whether the merged + statistics corresponding to MALLCTL_ARENAS_ALL are + initialized (always true). + + + + + arena.<i>.decay + (void) + -- + + Trigger decay-based purging of unused dirty/muzzy pages + for arena <i>, or for all arenas if <i> equals + MALLCTL_ARENAS_ALL. The proportion of unused + dirty/muzzy pages to be purged depends on the current time; see opt.dirty_decay_ms + and opt.muzy_decay_ms + for details. + + + + + arena.<i>.purge + (void) + -- + + Purge all unused dirty pages for arena <i>, or for + all arenas if <i> equals MALLCTL_ARENAS_ALL. + + + + + + arena.<i>.reset + (void) + -- + + Discard all of the arena's extant allocations. This + interface can only be used with arenas explicitly created via arenas.create. None + of the arena's discarded/cached allocations may accessed afterward. As + part of this requirement, all thread caches which were used to + allocate/deallocate in conjunction with the arena must be flushed + beforehand. + + + + + arena.<i>.destroy + (void) + -- + + Destroy the arena. Discard all of the arena's extant + allocations using the same mechanism as for arena.<i>.reset + (with all the same constraints and side effects), merge the arena stats + into those accessible at arena index + MALLCTL_ARENAS_DESTROYED, and then completely + discard all metadata associated with the arena. Future calls to arenas.create may + recycle the arena index. Destruction will fail if any threads are + currently associated with the arena as a result of calls to thread.arena. + + + + + arena.<i>.dss + (const char *) + rw + + Set the precedence of dss allocation as related to mmap + allocation for arena <i>, or for all arenas if <i> equals + MALLCTL_ARENAS_ALL. See opt.dss for supported + settings. + + + + + arena.<i>.dirty_decay_ms + (ssize_t) + rw + + Current per-arena approximate time in milliseconds from + the creation of a set of unused dirty pages until an equivalent set of + unused dirty pages is purged and/or reused. Each time this interface is + set, all currently unused dirty pages are considered to have fully + decayed, which causes immediate purging of all unused dirty pages unless + the decay time is set to -1 (i.e. purging disabled). See opt.dirty_decay_ms + for additional information. + + + + + arena.<i>.muzzy_decay_ms + (ssize_t) + rw + + Current per-arena approximate time in milliseconds from + the creation of a set of unused muzzy pages until an equivalent set of + unused muzzy pages is purged and/or reused. Each time this interface is + set, all currently unused muzzy pages are considered to have fully + decayed, which causes immediate purging of all unused muzzy pages unless + the decay time is set to -1 (i.e. purging disabled). See opt.muzzy_decay_ms + for additional information. + + + + + arena.<i>.retain_grow_limit + (size_t) + rw + + Maximum size to grow retained region (only relevant when + opt.retain is + enabled). This controls the maximum increment to expand virtual memory, + or allocation through arena.<i>extent_hooks. + In particular, if customized extent hooks reserve physical memory + (e.g. 1G huge pages), this is useful to control the allocation hook's + input size. The default is no limit. + + + + + arena.<i>.extent_hooks + (extent_hooks_t *) + rw + + Get or set the extent management hook functions for + arena <i>. The functions must be capable of operating on all + extant extents associated with arena <i>, usually by passing + unknown extents to the replaced functions. In practice, it is feasible + to control allocation for arenas explicitly created via arenas.create such + that all extents originate from an application-supplied extent allocator + (by specifying the custom extent hook functions during arena creation). + However, the API guarantees for the automatically created arenas may be + relaxed -- hooks set there may be called in a "best effort" fashion; in + addition there may be extents created prior to the application having an + opportunity to take over extent allocation. + + + The extent_hooks_t structure comprises function + pointers which are described individually below. jemalloc uses these + functions to manage extent lifetime, which starts off with allocation of + mapped committed memory, in the simplest case followed by deallocation. + However, there are performance and platform reasons to retain extents + for later reuse. Cleanup attempts cascade from deallocation to decommit + to forced purging to lazy purging, which gives the extent management + functions opportunities to reject the most permanent cleanup operations + in favor of less permanent (and often less costly) operations. All + operations except allocation can be universally opted out of by setting + the hook pointers to NULL, or selectively opted out + of by returning failure. Note that once the extent hook is set, the + structure is accessed directly by the associated arenas, so it must + remain valid for the entire lifetime of the arenas. + + + typedef void *(extent_alloc_t) + extent_hooks_t *extent_hooks + void *new_addr + size_t size + size_t alignment + bool *zero + bool *commit + unsigned arena_ind + + + An extent allocation function conforms to the + extent_alloc_t type and upon success returns a pointer to + size bytes of mapped memory on behalf of arena + arena_ind such that the extent's base address is + a multiple of alignment, as well as setting + *zero to indicate whether the extent is zeroed + and *commit to indicate whether the extent is + committed. Upon error the function returns NULL + and leaves *zero and + *commit unmodified. The + size parameter is always a multiple of the page + size. The alignment parameter is always a power + of two at least as large as the page size. Zeroing is mandatory if + *zero is true upon function entry. Committing is + mandatory if *commit is true upon function entry. + If new_addr is not NULL, the + returned pointer must be new_addr on success or + NULL on error. Committed memory may be committed + in absolute terms as on a system that does not overcommit, or in + implicit terms as on a system that overcommits and satisfies physical + memory needs on demand via soft page faults. Note that replacing the + default extent allocation function makes the arena's arena.<i>.dss + setting irrelevant. + + + typedef bool (extent_dalloc_t) + extent_hooks_t *extent_hooks + void *addr + size_t size + bool committed + unsigned arena_ind + + + + An extent deallocation function conforms to the + extent_dalloc_t type and deallocates an extent at given + addr and size with + committed/decommited memory as indicated, on + behalf of arena arena_ind, returning false upon + success. If the function returns true, this indicates opt-out from + deallocation; the virtual memory mapping associated with the extent + remains mapped, in the same commit state, and available for future use, + in which case it will be automatically retained for later reuse. + + + typedef void (extent_destroy_t) + extent_hooks_t *extent_hooks + void *addr + size_t size + bool committed + unsigned arena_ind + + + + An extent destruction function conforms to the + extent_destroy_t type and unconditionally destroys an + extent at given addr and + size with + committed/decommited memory as indicated, on + behalf of arena arena_ind. This function may be + called to destroy retained extents during arena destruction (see arena.<i>.destroy). + + + typedef bool (extent_commit_t) + extent_hooks_t *extent_hooks + void *addr + size_t size + size_t offset + size_t length + unsigned arena_ind + + + An extent commit function conforms to the + extent_commit_t type and commits zeroed physical memory to + back pages within an extent at given addr and + size at offset bytes, + extending for length on behalf of arena + arena_ind, returning false upon success. + Committed memory may be committed in absolute terms as on a system that + does not overcommit, or in implicit terms as on a system that + overcommits and satisfies physical memory needs on demand via soft page + faults. If the function returns true, this indicates insufficient + physical memory to satisfy the request. + + + typedef bool (extent_decommit_t) + extent_hooks_t *extent_hooks + void *addr + size_t size + size_t offset + size_t length + unsigned arena_ind + + + An extent decommit function conforms to the + extent_decommit_t type and decommits any physical memory + that is backing pages within an extent at given + addr and size at + offset bytes, extending for + length on behalf of arena + arena_ind, returning false upon success, in which + case the pages will be committed via the extent commit function before + being reused. If the function returns true, this indicates opt-out from + decommit; the memory remains committed and available for future use, in + which case it will be automatically retained for later reuse. + + + typedef bool (extent_purge_t) + extent_hooks_t *extent_hooks + void *addr + size_t size + size_t offset + size_t length + unsigned arena_ind + + + An extent purge function conforms to the + extent_purge_t type and discards physical pages + within the virtual memory mapping associated with an extent at given + addr and size at + offset bytes, extending for + length on behalf of arena + arena_ind. A lazy extent purge function (e.g. + implemented via + madvise(...MADV_FREE)) + can delay purging indefinitely and leave the pages within the purged + virtual memory range in an indeterminite state, whereas a forced extent + purge function immediately purges, and the pages within the virtual + memory range will be zero-filled the next time they are accessed. If + the function returns true, this indicates failure to purge. + + + typedef bool (extent_split_t) + extent_hooks_t *extent_hooks + void *addr + size_t size + size_t size_a + size_t size_b + bool committed + unsigned arena_ind + + + An extent split function conforms to the + extent_split_t type and optionally splits an extent at + given addr and size into + two adjacent extents, the first of size_a bytes, + and the second of size_b bytes, operating on + committed/decommitted memory as indicated, on + behalf of arena arena_ind, returning false upon + success. If the function returns true, this indicates that the extent + remains unsplit and therefore should continue to be operated on as a + whole. + + + typedef bool (extent_merge_t) + extent_hooks_t *extent_hooks + void *addr_a + size_t size_a + void *addr_b + size_t size_b + bool committed + unsigned arena_ind + + + An extent merge function conforms to the + extent_merge_t type and optionally merges adjacent extents, + at given addr_a and size_a + with given addr_b and + size_b into one contiguous extent, operating on + committed/decommitted memory as indicated, on + behalf of arena arena_ind, returning false upon + success. If the function returns true, this indicates that the extents + remain distinct mappings and therefore should continue to be operated on + independently. + + + + + + arenas.narenas + (unsigned) + r- + + Current limit on number of arenas. + + + + + arenas.dirty_decay_ms + (ssize_t) + rw + + Current default per-arena approximate time in + milliseconds from the creation of a set of unused dirty pages until an + equivalent set of unused dirty pages is purged and/or reused, used to + initialize arena.<i>.dirty_decay_ms + during arena creation. See opt.dirty_decay_ms + for additional information. + + + + + arenas.muzzy_decay_ms + (ssize_t) + rw + + Current default per-arena approximate time in + milliseconds from the creation of a set of unused muzzy pages until an + equivalent set of unused muzzy pages is purged and/or reused, used to + initialize arena.<i>.muzzy_decay_ms + during arena creation. See opt.muzzy_decay_ms + for additional information. + + + + + arenas.quantum + (size_t) + r- + + Quantum size. + + + + + arenas.page + (size_t) + r- + + Page size. + + + + + arenas.tcache_max + (size_t) + r- + + Maximum thread-cached size class. + + + + + arenas.nbins + (unsigned) + r- + + Number of bin size classes. + + + + + arenas.nhbins + (unsigned) + r- + + Total number of thread cache bin size + classes. + + + + + arenas.bin.<i>.size + (size_t) + r- + + Maximum size supported by size class. + + + + + arenas.bin.<i>.nregs + (uint32_t) + r- + + Number of regions per slab. + + + + + arenas.bin.<i>.slab_size + (size_t) + r- + + Number of bytes per slab. + + + + + arenas.nlextents + (unsigned) + r- + + Total number of large size classes. + + + + + arenas.lextent.<i>.size + (size_t) + r- + + Maximum size supported by this large size + class. + + + + + arenas.create + (unsigned, extent_hooks_t *) + rw + + Explicitly create a new arena outside the range of + automatically managed arenas, with optionally specified extent hooks, + and return the new arena index. + + + + + arenas.lookup + (unsigned, void*) + rw + + Index of the arena to which an allocation belongs to. + + + + + prof.thread_active_init + (bool) + rw + [] + + Control the initial setting for thread.prof.active + in newly created threads. See the opt.prof_thread_active_init + option for additional information. + + + + + prof.active + (bool) + rw + [] + + Control whether sampling is currently active. See the + opt.prof_active + option for additional information, as well as the interrelated thread.prof.active + mallctl. + + + + + prof.dump + (const char *) + -w + [] + + Dump a memory profile to the specified file, or if NULL + is specified, to a file according to the pattern + <prefix>.<pid>.<seq>.m<mseq>.heap, + where <prefix> is controlled by the + opt.prof_prefix + option. + + + + + prof.gdump + (bool) + rw + [] + + When enabled, trigger a memory profile dump every time + the total virtual memory exceeds the previous maximum. Profiles are + dumped to files named according to the pattern + <prefix>.<pid>.<seq>.u<useq>.heap, + where <prefix> is controlled by the opt.prof_prefix + option. + + + + + prof.reset + (size_t) + -w + [] + + Reset all memory profile statistics, and optionally + update the sample rate (see opt.lg_prof_sample + and prof.lg_sample). + + + + + + prof.lg_sample + (size_t) + r- + [] + + Get the current sample rate (see opt.lg_prof_sample). + + + + + + prof.interval + (uint64_t) + r- + [] + + Average number of bytes allocated between + interval-based profile dumps. See the + opt.lg_prof_interval + option for additional information. + + + + + stats.allocated + (size_t) + r- + [] + + Total number of bytes allocated by the + application. + + + + + stats.active + (size_t) + r- + [] + + Total number of bytes in active pages allocated by the + application. This is a multiple of the page size, and greater than or + equal to stats.allocated. + This does not include + stats.arenas.<i>.pdirty, + + stats.arenas.<i>.pmuzzy, nor pages + entirely devoted to allocator metadata. + + + + + stats.metadata + (size_t) + r- + [] + + Total number of bytes dedicated to metadata, which + comprise base allocations used for bootstrap-sensitive allocator + metadata structures (see stats.arenas.<i>.base) + and internal allocations (see stats.arenas.<i>.internal). + Transparent huge page (enabled with opt.metadata_thp) usage is not + considered. + + + + + stats.metadata_thp + (size_t) + r- + [] + + Number of transparent huge pages (THP) used for + metadata. See stats.metadata and + opt.metadata_thp) for + details. + + + + + stats.resident + (size_t) + r- + [] + + Maximum number of bytes in physically resident data + pages mapped by the allocator, comprising all pages dedicated to + allocator metadata, pages backing active allocations, and unused dirty + pages. This is a maximum rather than precise because pages may not + actually be physically resident if they correspond to demand-zeroed + virtual memory that has not yet been touched. This is a multiple of the + page size, and is larger than stats.active. + + + + + stats.mapped + (size_t) + r- + [] + + Total number of bytes in active extents mapped by the + allocator. This is larger than stats.active. This + does not include inactive extents, even those that contain unused dirty + pages, which means that there is no strict ordering between this and + stats.resident. + + + + + stats.retained + (size_t) + r- + [] + + Total number of bytes in virtual memory mappings that + were retained rather than being returned to the operating system via + e.g. munmap + 2 or similar. Retained virtual + memory is typically untouched, decommitted, or purged, so it has no + strongly associated physical memory (see extent hooks for details). + Retained memory is excluded from mapped memory statistics, e.g. stats.mapped. + + + + + + stats.background_thread.num_threads + (size_t) + r- + [] + + Number of background + threads running currently. + + + + + stats.background_thread.num_runs + (uint64_t) + r- + [] + + Total number of runs from all background threads. + + + + + stats.background_thread.run_interval + (uint64_t) + r- + [] + + Average run interval in nanoseconds of background threads. + + + + + stats.mutexes.ctl.{counter}; + (counter specific type) + r- + [] + + Statistics on ctl mutex (global + scope; mallctl related). {counter} is one of the + counters below: + + num_ops (uint64_t): + Total number of lock acquisition operations on this mutex. + + num_spin_acq (uint64_t): Number + of times the mutex was spin-acquired. When the mutex is currently + locked and cannot be acquired immediately, a short period of + spin-retry within jemalloc will be performed. Acquired through spin + generally means the contention was lightweight and not causing context + switches. + + num_wait (uint64_t): Number of + times the mutex was wait-acquired, which means the mutex contention + was not solved by spin-retry, and blocking operation was likely + involved in order to acquire the mutex. This event generally implies + higher cost / longer delay, and should be investigated if it happens + often. + + max_wait_time (uint64_t): + Maximum length of time in nanoseconds spent on a single wait-acquired + lock operation. Note that to avoid profiling overhead on the common + path, this does not consider spin-acquired cases. + + total_wait_time (uint64_t): + Cumulative time in nanoseconds spent on wait-acquired lock operations. + Similarly, spin-acquired cases are not considered. + + max_num_thds (uint32_t): Maximum + number of threads waiting on this mutex simultaneously. Similarly, + spin-acquired cases are not considered. + + num_owner_switch (uint64_t): + Number of times the current mutex owner is different from the previous + one. This event does not generally imply an issue; rather it is an + indicator of how often the protected data are accessed by different + threads. + + + + + + + + + stats.mutexes.background_thread.{counter} + (counter specific type) r- + [] + + Statistics on background_thread mutex + (global scope; background_thread + related). {counter} is one of the counters in mutex profiling + counters. + + + + + stats.mutexes.prof.{counter} + (counter specific type) r- + [] + + Statistics on prof mutex (global + scope; profiling related). {counter} is one of the + counters in mutex profiling + counters. + + + + + stats.mutexes.reset + (void) -- + [] + + Reset all mutex profile statistics, including global + mutexes, arena mutexes and bin mutexes. + + + + + stats.arenas.<i>.dss + (const char *) + r- + + dss (sbrk + 2) allocation precedence as + related to mmap + 2 allocation. See opt.dss for details. + + + + + + stats.arenas.<i>.dirty_decay_ms + (ssize_t) + r- + + Approximate time in milliseconds from the creation of a + set of unused dirty pages until an equivalent set of unused dirty pages + is purged and/or reused. See opt.dirty_decay_ms + for details. + + + + + stats.arenas.<i>.muzzy_decay_ms + (ssize_t) + r- + + Approximate time in milliseconds from the creation of a + set of unused muzzy pages until an equivalent set of unused muzzy pages + is purged and/or reused. See opt.muzzy_decay_ms + for details. + + + + + stats.arenas.<i>.nthreads + (unsigned) + r- + + Number of threads currently assigned to + arena. + + + + + stats.arenas.<i>.uptime + (uint64_t) + r- + + Time elapsed (in nanoseconds) since the arena was + created. If <i> equals 0 or + MALLCTL_ARENAS_ALL, this is the uptime since malloc + initialization. + + + + + stats.arenas.<i>.pactive + (size_t) + r- + + Number of pages in active extents. + + + + + stats.arenas.<i>.pdirty + (size_t) + r- + + Number of pages within unused extents that are + potentially dirty, and for which madvise() or + similar has not been called. See opt.dirty_decay_ms + for a description of dirty pages. + + + + + stats.arenas.<i>.pmuzzy + (size_t) + r- + + Number of pages within unused extents that are muzzy. + See opt.muzzy_decay_ms + for a description of muzzy pages. + + + + + stats.arenas.<i>.mapped + (size_t) + r- + [] + + Number of mapped bytes. + + + + + stats.arenas.<i>.retained + (size_t) + r- + [] + + Number of retained bytes. See stats.retained for + details. + + + + + stats.arenas.<i>.extent_avail + (size_t) + r- + [] + + Number of allocated (but unused) extent structs in this + arena. + + + + + stats.arenas.<i>.base + (size_t) + r- + [] + + + Number of bytes dedicated to bootstrap-sensitive allocator metadata + structures. + + + + + stats.arenas.<i>.internal + (size_t) + r- + [] + + Number of bytes dedicated to internal allocations. + Internal allocations differ from application-originated allocations in + that they are for internal use, and that they are omitted from heap + profiles. + + + + + stats.arenas.<i>.metadata_thp + (size_t) + r- + [] + + Number of transparent huge pages (THP) used for + metadata. See opt.metadata_thp + for details. + + + + + stats.arenas.<i>.resident + (size_t) + r- + [] + + Maximum number of bytes in physically resident data + pages mapped by the arena, comprising all pages dedicated to allocator + metadata, pages backing active allocations, and unused dirty pages. + This is a maximum rather than precise because pages may not actually be + physically resident if they correspond to demand-zeroed virtual memory + that has not yet been touched. This is a multiple of the page + size. + + + + + stats.arenas.<i>.dirty_npurge + (uint64_t) + r- + [] + + Number of dirty page purge sweeps performed. + + + + + + stats.arenas.<i>.dirty_nmadvise + (uint64_t) + r- + [] + + Number of madvise() or similar + calls made to purge dirty pages. + + + + + stats.arenas.<i>.dirty_purged + (uint64_t) + r- + [] + + Number of dirty pages purged. + + + + + stats.arenas.<i>.muzzy_npurge + (uint64_t) + r- + [] + + Number of muzzy page purge sweeps performed. + + + + + + stats.arenas.<i>.muzzy_nmadvise + (uint64_t) + r- + [] + + Number of madvise() or similar + calls made to purge muzzy pages. + + + + + stats.arenas.<i>.muzzy_purged + (uint64_t) + r- + [] + + Number of muzzy pages purged. + + + + + stats.arenas.<i>.small.allocated + (size_t) + r- + [] + + Number of bytes currently allocated by small objects. + + + + + + stats.arenas.<i>.small.nmalloc + (uint64_t) + r- + [] + + Cumulative number of times a small allocation was + requested from the arena's bins, whether to fill the relevant tcache if + opt.tcache is + enabled, or to directly satisfy an allocation request + otherwise. + + + + + stats.arenas.<i>.small.ndalloc + (uint64_t) + r- + [] + + Cumulative number of times a small allocation was + returned to the arena's bins, whether to flush the relevant tcache if + opt.tcache is + enabled, or to directly deallocate an allocation + otherwise. + + + + + stats.arenas.<i>.small.nrequests + (uint64_t) + r- + [] + + Cumulative number of allocation requests satisfied by + all bin size classes. + + + + + stats.arenas.<i>.small.nfills + (uint64_t) + r- + [] + + Cumulative number of tcache fills by all small size + classes. + + + + + stats.arenas.<i>.small.nflushes + (uint64_t) + r- + [] + + Cumulative number of tcache flushes by all small size + classes. + + + + + stats.arenas.<i>.large.allocated + (size_t) + r- + [] + + Number of bytes currently allocated by large objects. + + + + + + stats.arenas.<i>.large.nmalloc + (uint64_t) + r- + [] + + Cumulative number of times a large extent was allocated + from the arena, whether to fill the relevant tcache if opt.tcache is enabled and + the size class is within the range being cached, or to directly satisfy + an allocation request otherwise. + + + + + stats.arenas.<i>.large.ndalloc + (uint64_t) + r- + [] + + Cumulative number of times a large extent was returned + to the arena, whether to flush the relevant tcache if opt.tcache is enabled and + the size class is within the range being cached, or to directly + deallocate an allocation otherwise. + + + + + stats.arenas.<i>.large.nrequests + (uint64_t) + r- + [] + + Cumulative number of allocation requests satisfied by + all large size classes. + + + + + stats.arenas.<i>.large.nfills + (uint64_t) + r- + [] + + Cumulative number of tcache fills by all large size + classes. + + + + + stats.arenas.<i>.large.nflushes + (uint64_t) + r- + [] + + Cumulative number of tcache flushes by all large size + classes. + + + + + stats.arenas.<i>.bins.<j>.nmalloc + (uint64_t) + r- + [] + + Cumulative number of times a bin region of the + corresponding size class was allocated from the arena, whether to fill + the relevant tcache if opt.tcache is enabled, or + to directly satisfy an allocation request otherwise. + + + + + stats.arenas.<i>.bins.<j>.ndalloc + (uint64_t) + r- + [] + + Cumulative number of times a bin region of the + corresponding size class was returned to the arena, whether to flush the + relevant tcache if opt.tcache is enabled, or + to directly deallocate an allocation otherwise. + + + + + stats.arenas.<i>.bins.<j>.nrequests + (uint64_t) + r- + [] + + Cumulative number of allocation requests satisfied by + bin regions of the corresponding size class. + + + + + stats.arenas.<i>.bins.<j>.curregs + (size_t) + r- + [] + + Current number of regions for this size + class. + + + + + stats.arenas.<i>.bins.<j>.nfills + (uint64_t) + r- + + Cumulative number of tcache fills. + + + + + stats.arenas.<i>.bins.<j>.nflushes + (uint64_t) + r- + + Cumulative number of tcache flushes. + + + + + stats.arenas.<i>.bins.<j>.nslabs + (uint64_t) + r- + [] + + Cumulative number of slabs created. + + + + + stats.arenas.<i>.bins.<j>.nreslabs + (uint64_t) + r- + [] + + Cumulative number of times the current slab from which + to allocate changed. + + + + + stats.arenas.<i>.bins.<j>.curslabs + (size_t) + r- + [] + + Current number of slabs. + + + + + + stats.arenas.<i>.bins.<j>.nonfull_slabs + (size_t) + r- + [] + + Current number of nonfull slabs. + + + + + stats.arenas.<i>.bins.<j>.mutex.{counter} + (counter specific type) r- + [] + + Statistics on + arena.<i>.bins.<j> mutex (arena bin + scope; bin operation related). {counter} is one of + the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.extents.<j>.n{extent_type} + (size_t) + r- + [] + + Number of extents of the given type in this arena in + the bucket corresponding to page size index <j>. The extent type + is one of dirty, muzzy, or retained. + + + + + stats.arenas.<i>.extents.<j>.{extent_type}_bytes + (size_t) + r- + [] + + Sum of the bytes managed by extents of the given type + in this arena in the bucket corresponding to page size index <j>. + The extent type is one of dirty, muzzy, or retained. + + + + + stats.arenas.<i>.lextents.<j>.nmalloc + (uint64_t) + r- + [] + + Cumulative number of times a large extent of the + corresponding size class was allocated from the arena, whether to fill + the relevant tcache if opt.tcache is enabled and + the size class is within the range being cached, or to directly satisfy + an allocation request otherwise. + + + + + stats.arenas.<i>.lextents.<j>.ndalloc + (uint64_t) + r- + [] + + Cumulative number of times a large extent of the + corresponding size class was returned to the arena, whether to flush the + relevant tcache if opt.tcache is enabled and + the size class is within the range being cached, or to directly + deallocate an allocation otherwise. + + + + + stats.arenas.<i>.lextents.<j>.nrequests + (uint64_t) + r- + [] + + Cumulative number of allocation requests satisfied by + large extents of the corresponding size class. + + + + + stats.arenas.<i>.lextents.<j>.curlextents + (size_t) + r- + [] + + Current number of large allocations for this size class. + + + + + + stats.arenas.<i>.mutexes.large.{counter} + (counter specific type) r- + [] + + Statistics on arena.<i>.large + mutex (arena scope; large allocation related). + {counter} is one of the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.mutexes.extent_avail.{counter} + (counter specific type) r- + [] + + Statistics on arena.<i>.extent_avail + mutex (arena scope; extent avail related). + {counter} is one of the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.mutexes.extents_dirty.{counter} + (counter specific type) r- + [] + + Statistics on arena.<i>.extents_dirty + mutex (arena scope; dirty extents related). + {counter} is one of the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.mutexes.extents_muzzy.{counter} + (counter specific type) r- + [] + + Statistics on arena.<i>.extents_muzzy + mutex (arena scope; muzzy extents related). + {counter} is one of the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.mutexes.extents_retained.{counter} + (counter specific type) r- + [] + + Statistics on arena.<i>.extents_retained + mutex (arena scope; retained extents related). + {counter} is one of the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.mutexes.decay_dirty.{counter} + (counter specific type) r- + [] + + Statistics on arena.<i>.decay_dirty + mutex (arena scope; decay for dirty pages related). + {counter} is one of the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.mutexes.decay_muzzy.{counter} + (counter specific type) r- + [] + + Statistics on arena.<i>.decay_muzzy + mutex (arena scope; decay for muzzy pages related). + {counter} is one of the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.mutexes.base.{counter} + (counter specific type) r- + [] + + Statistics on arena.<i>.base + mutex (arena scope; base allocator related). + {counter} is one of the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.mutexes.tcache_list.{counter} + (counter specific type) r- + [] + + Statistics on + arena.<i>.tcache_list mutex (arena scope; + tcache to arena association related). This mutex is expected to be + accessed less often. {counter} is one of the + counters in mutex profiling + counters. + + + + + + HEAP PROFILE FORMAT + Although the heap profiling functionality was originally designed to + be compatible with the + pprof command that is developed as part of the gperftools + package, the addition of per thread heap profiling functionality + required a different heap profile format. The jeprof + command is derived from pprof, with enhancements to + support the heap profile format described here. + + In the following hypothetical heap profile, [...] + indicates elision for the sake of compactness. The following matches the above heap profile, but most +tokens are replaced with <description> to indicate +descriptions of the corresponding fields. / + : : [: ] + [...] + : : [: ] + [...] + : : [: ] + [...] +@ [...] [...] + : : [: ] + : : [: ] + : : [: ] +[...] + +MAPPED_LIBRARIES: +/maps>]]> + + + + DEBUGGING MALLOC PROBLEMS + When debugging, it is a good idea to configure/build jemalloc with + the and + options, and recompile the program with suitable options and symbols for + debugger support. When so configured, jemalloc incorporates a wide variety + of run-time assertions that catch application errors such as double-free, + write-after-free, etc. + + Programs often accidentally depend on uninitialized + memory actually being filled with zero bytes. Junk filling + (see the opt.junk + option) tends to expose such bugs in the form of obviously incorrect + results and/or coredumps. Conversely, zero + filling (see the opt.zero option) eliminates + the symptoms of such bugs. Between these two options, it is usually + possible to quickly detect, diagnose, and eliminate such bugs. + + This implementation does not provide much detail about the problems + it detects, because the performance impact for storing such information + would be prohibitive. + + + DIAGNOSTIC MESSAGES + If any of the memory allocation/deallocation functions detect an + error or warning condition, a message will be printed to file descriptor + STDERR_FILENO. Errors will result in the process + dumping core. If the opt.abort option is set, most + warnings are treated as errors. + + The malloc_message variable allows the programmer + to override the function which emits the text strings forming the errors + and warnings if for some reason the STDERR_FILENO file + descriptor is not suitable for this. + malloc_message() takes the + cbopaque pointer argument that is + NULL unless overridden by the arguments in a call to + malloc_stats_print(), followed by a string + pointer. Please note that doing anything which tries to allocate memory in + this function is likely to result in a crash or deadlock. + + All messages are prefixed by + <jemalloc>: . + + + RETURN VALUES + + Standard API + The malloc() and + calloc() functions return a pointer to the + allocated memory if successful; otherwise a NULL + pointer is returned and errno is set to + ENOMEM. + + The posix_memalign() function + returns the value 0 if successful; otherwise it returns an error value. + The posix_memalign() function will fail + if: + + + EINVAL + + The alignment parameter is + not a power of 2 at least as large as + sizeof(void *). + + + + ENOMEM + + Memory allocation error. + + + + + The aligned_alloc() function returns + a pointer to the allocated memory if successful; otherwise a + NULL pointer is returned and + errno is set. The + aligned_alloc() function will fail if: + + + EINVAL + + The alignment parameter is + not a power of 2. + + + + ENOMEM + + Memory allocation error. + + + + + The realloc() function returns a + pointer, possibly identical to ptr, to the + allocated memory if successful; otherwise a NULL + pointer is returned, and errno is set to + ENOMEM if the error was the result of an + allocation failure. The realloc() + function always leaves the original buffer intact when an error occurs. + + + The free() function returns no + value. + + + Non-standard API + The mallocx() and + rallocx() functions return a pointer to + the allocated memory if successful; otherwise a NULL + pointer is returned to indicate insufficient contiguous memory was + available to service the allocation request. + + The xallocx() function returns the + real size of the resulting resized allocation pointed to by + ptr, which is a value less than + size if the allocation could not be adequately + grown in place. + + The sallocx() function returns the + real size of the allocation pointed to by ptr. + + + The nallocx() returns the real size + that would result from a successful equivalent + mallocx() function call, or zero if + insufficient memory is available to perform the size computation. + + The mallctl(), + mallctlnametomib(), and + mallctlbymib() functions return 0 on + success; otherwise they return an error value. The functions will fail + if: + + + EINVAL + + newp is not + NULL, and newlen is too + large or too small. Alternatively, *oldlenp + is too large or too small; in this case as much data as possible + are read despite the error. + + + ENOENT + + name or + mib specifies an unknown/invalid + value. + + + EPERM + + Attempt to read or write void value, or attempt to + write read-only value. + + + EAGAIN + + A memory allocation failure + occurred. + + + EFAULT + + An interface with side effects failed in some way + not directly related to mallctl*() + read/write processing. + + + + + The malloc_usable_size() function + returns the usable size of the allocation pointed to by + ptr. + + + + ENVIRONMENT + The following environment variable affects the execution of the + allocation functions: + + + MALLOC_CONF + + If the environment variable + MALLOC_CONF is set, the characters it contains + will be interpreted as options. + + + + + + EXAMPLES + To dump core whenever a problem occurs: + ln -s 'abort:true' /etc/malloc.conf + + To specify in the source that only one arena should be automatically + created: + + + + SEE ALSO + madvise + 2, + mmap + 2, + sbrk + 2, + utrace + 2, + alloca + 3, + atexit + 3, + getpagesize + 3 + + + STANDARDS + The malloc(), + calloc(), + realloc(), and + free() functions conform to ISO/IEC + 9899:1990 (ISO C90). + + The posix_memalign() function conforms + to IEEE Std 1003.1-2001 (POSIX.1). + + diff --git a/deps/jemalloc/doc/manpages.xsl.in b/deps/jemalloc/doc/manpages.xsl.in new file mode 100644 index 0000000..88b2626 --- /dev/null +++ b/deps/jemalloc/doc/manpages.xsl.in @@ -0,0 +1,4 @@ + + + + diff --git a/deps/jemalloc/doc/stylesheet.xsl b/deps/jemalloc/doc/stylesheet.xsl new file mode 100644 index 0000000..619365d --- /dev/null +++ b/deps/jemalloc/doc/stylesheet.xsl @@ -0,0 +1,10 @@ + + ansi + + + + + + + + diff --git a/deps/jemalloc/include/jemalloc/internal/arena_externs.h b/deps/jemalloc/include/jemalloc/internal/arena_externs.h new file mode 100644 index 0000000..a4523ae --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/arena_externs.h @@ -0,0 +1,104 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H +#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H + +#include "jemalloc/internal/bin.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/hook.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/stats.h" + +extern ssize_t opt_dirty_decay_ms; +extern ssize_t opt_muzzy_decay_ms; + +extern percpu_arena_mode_t opt_percpu_arena; +extern const char *percpu_arena_mode_names[]; + +extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS]; +extern malloc_mutex_t arenas_lock; + +extern size_t opt_oversize_threshold; +extern size_t oversize_threshold; + +void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, + unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, + ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy); +void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, + bin_stats_t *bstats, arena_stats_large_t *lstats, + arena_stats_extents_t *estats); +void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent); +#ifdef JEMALLOC_JET +size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr); +#endif +extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, + size_t usize, size_t alignment, bool *zero); +void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, + extent_t *extent); +void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, + extent_t *extent, size_t oldsize); +void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, + extent_t *extent, size_t oldsize); +ssize_t arena_dirty_decay_ms_get(arena_t *arena); +bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms); +ssize_t arena_muzzy_decay_ms_get(arena_t *arena); +bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms); +void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, + bool all); +void arena_reset(tsd_t *tsd, arena_t *arena); +void arena_destroy(tsd_t *tsd, arena_t *arena); +void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); +void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, + bool zero); + +typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *); +extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small; + +void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, + szind_t ind, bool zero); +void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache); +void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize); +void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, + bool slow_path); +void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin, + szind_t binind, extent_t *extent, void *ptr); +void arena_dalloc_small(tsdn_t *tsdn, void *ptr); +bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero, size_t *newsize); +void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, + size_t size, size_t alignment, bool zero, tcache_t *tcache, + hook_ralloc_args_t *hook_args); +dss_prec_t arena_dss_prec_get(arena_t *arena); +bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); +ssize_t arena_dirty_decay_ms_default_get(void); +bool arena_dirty_decay_ms_default_set(ssize_t decay_ms); +ssize_t arena_muzzy_decay_ms_default_get(void); +bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms); +bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, + size_t *old_limit, size_t *new_limit); +unsigned arena_nthreads_get(arena_t *arena, bool internal); +void arena_nthreads_inc(arena_t *arena, bool internal); +void arena_nthreads_dec(arena_t *arena, bool internal); +size_t arena_extent_sn_next(arena_t *arena); +arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); +bool arena_init_huge(void); +bool arena_is_huge(unsigned arena_ind); +arena_t *arena_choose_huge(tsd_t *tsd); +bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind, + unsigned *binshard); +void arena_boot(sc_data_t *sc_data); +void arena_prefork0(tsdn_t *tsdn, arena_t *arena); +void arena_prefork1(tsdn_t *tsdn, arena_t *arena); +void arena_prefork2(tsdn_t *tsdn, arena_t *arena); +void arena_prefork3(tsdn_t *tsdn, arena_t *arena); +void arena_prefork4(tsdn_t *tsdn, arena_t *arena); +void arena_prefork5(tsdn_t *tsdn, arena_t *arena); +void arena_prefork6(tsdn_t *tsdn, arena_t *arena); +void arena_prefork7(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); + +#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h b/deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h new file mode 100644 index 0000000..9abf7f6 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h @@ -0,0 +1,57 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H +#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H + +static inline unsigned +arena_ind_get(const arena_t *arena) { + return base_ind_get(arena->base); +} + +static inline void +arena_internal_add(arena_t *arena, size_t size) { + atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED); +} + +static inline void +arena_internal_sub(arena_t *arena, size_t size) { + atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED); +} + +static inline size_t +arena_internal_get(arena_t *arena) { + return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED); +} + +static inline bool +arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { + cassert(config_prof); + + if (likely(prof_interval == 0 || !prof_active_get_unlocked())) { + return false; + } + + return prof_accum_add(tsdn, &arena->prof_accum, accumbytes); +} + +static inline void +percpu_arena_update(tsd_t *tsd, unsigned cpu) { + assert(have_percpu_arena); + arena_t *oldarena = tsd_arena_get(tsd); + assert(oldarena != NULL); + unsigned oldind = arena_ind_get(oldarena); + + if (oldind != cpu) { + unsigned newind = cpu; + arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true); + assert(newarena != NULL); + + /* Set new arena/tcache associations. */ + arena_migrate(tsd, oldind, newind); + tcache_t *tcache = tcache_get(tsd); + if (tcache != NULL) { + tcache_arena_reassociate(tsd_tsdn(tsd), tcache, + newarena); + } + } +} + +#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h b/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h new file mode 100644 index 0000000..dd92657 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h @@ -0,0 +1,427 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H +#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H + +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/ticker.h" + +JEMALLOC_ALWAYS_INLINE bool +arena_has_default_hooks(arena_t *arena) { + return (extent_hooks_get(arena) == &extent_hooks_default); +} + +JEMALLOC_ALWAYS_INLINE arena_t * +arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) { + if (arena != NULL) { + return arena; + } + + /* + * For huge allocations, use the dedicated huge arena if both are true: + * 1) is using auto arena selection (i.e. arena == NULL), and 2) the + * thread is not assigned to a manual arena. + */ + if (unlikely(size >= oversize_threshold)) { + arena_t *tsd_arena = tsd_arena_get(tsd); + if (tsd_arena == NULL || arena_is_auto(tsd_arena)) { + return arena_choose_huge(tsd); + } + } + + return arena_choose(tsd, NULL); +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { + cassert(config_prof); + assert(ptr != NULL); + + /* Static check. */ + if (alloc_ctx == NULL) { + const extent_t *extent = iealloc(tsdn, ptr); + if (unlikely(!extent_slab_get(extent))) { + return large_prof_tctx_get(tsdn, extent); + } + } else { + if (unlikely(!alloc_ctx->slab)) { + return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr)); + } + } + return (prof_tctx_t *)(uintptr_t)1U; +} + +JEMALLOC_ALWAYS_INLINE void +arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + + /* Static check. */ + if (alloc_ctx == NULL) { + extent_t *extent = iealloc(tsdn, ptr); + if (unlikely(!extent_slab_get(extent))) { + large_prof_tctx_set(tsdn, extent, tctx); + } + } else { + if (unlikely(!alloc_ctx->slab)) { + large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx); + } + } +} + +static inline void +arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + + extent_t *extent = iealloc(tsdn, ptr); + assert(!extent_slab_get(extent)); + + large_prof_tctx_reset(tsdn, extent); +} + +JEMALLOC_ALWAYS_INLINE nstime_t +arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, + alloc_ctx_t *alloc_ctx) { + cassert(config_prof); + assert(ptr != NULL); + + extent_t *extent = iealloc(tsdn, ptr); + /* + * Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're + * sure we have a sampled allocation. + */ + assert(!extent_slab_get(extent)); + return large_prof_alloc_time_get(extent); +} + +JEMALLOC_ALWAYS_INLINE void +arena_prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx, + nstime_t t) { + cassert(config_prof); + assert(ptr != NULL); + + extent_t *extent = iealloc(tsdn, ptr); + assert(!extent_slab_get(extent)); + large_prof_alloc_time_set(extent, t); +} + +JEMALLOC_ALWAYS_INLINE void +arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { + tsd_t *tsd; + ticker_t *decay_ticker; + + if (unlikely(tsdn_null(tsdn))) { + return; + } + tsd = tsdn_tsd(tsdn); + decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena)); + if (unlikely(decay_ticker == NULL)) { + return; + } + if (unlikely(ticker_ticks(decay_ticker, nticks))) { + arena_decay(tsdn, arena, false, false); + } +} + +JEMALLOC_ALWAYS_INLINE void +arena_decay_tick(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx); + + arena_decay_ticks(tsdn, arena, 1); +} + +/* Purge a single extent to retained / unmapped directly. */ +JEMALLOC_ALWAYS_INLINE void +arena_decay_extent(tsdn_t *tsdn,arena_t *arena, extent_hooks_t **r_extent_hooks, + extent_t *extent) { + size_t extent_size = extent_size_get(extent); + extent_dalloc_wrapper(tsdn, arena, + r_extent_hooks, extent); + if (config_stats) { + /* Update stats accordingly. */ + arena_stats_lock(tsdn, &arena->stats); + arena_stats_add_u64(tsdn, &arena->stats, + &arena->decay_dirty.stats->nmadvise, 1); + arena_stats_add_u64(tsdn, &arena->stats, + &arena->decay_dirty.stats->purged, extent_size >> LG_PAGE); + arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, + extent_size); + arena_stats_unlock(tsdn, &arena->stats); + } +} + +JEMALLOC_ALWAYS_INLINE void * +arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, + tcache_t *tcache, bool slow_path) { + assert(!tsdn_null(tsdn) || tcache == NULL); + + if (likely(tcache != NULL)) { + if (likely(size <= SC_SMALL_MAXCLASS)) { + return tcache_alloc_small(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path); + } + if (likely(size <= tcache_maxclass)) { + return tcache_alloc_large(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path); + } + /* (size > tcache_maxclass) case falls through. */ + assert(size > tcache_maxclass); + } + + return arena_malloc_hard(tsdn, arena, size, ind, zero); +} + +JEMALLOC_ALWAYS_INLINE arena_t * +arena_aalloc(tsdn_t *tsdn, const void *ptr) { + return extent_arena_get(iealloc(tsdn, ptr)); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_salloc(tsdn_t *tsdn, const void *ptr) { + assert(ptr != NULL); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true); + assert(szind != SC_NSIZES); + + return sz_index2size(szind); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_vsalloc(tsdn_t *tsdn, const void *ptr) { + /* + * Return 0 if ptr is not within an extent managed by jemalloc. This + * function has two extra costs relative to isalloc(): + * - The rtree calls cannot claim to be dependent lookups, which induces + * rtree lookup load dependencies. + * - The lookup may fail, so there is an extra branch to check for + * failure. + */ + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *extent; + szind_t szind; + if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, false, &extent, &szind)) { + return 0; + } + + if (extent == NULL) { + return 0; + } + assert(extent_state_get(extent) == extent_state_active); + /* Only slab members should be looked up via interior pointers. */ + assert(extent_addr_get(extent) == ptr || extent_slab_get(extent)); + + assert(szind != SC_NSIZES); + + return sz_index2size(szind); +} + +static inline void +arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) { + if (config_prof && unlikely(szind < SC_NBINS)) { + arena_dalloc_promoted(tsdn, ptr, NULL, true); + } else { + extent_t *extent = iealloc(tsdn, ptr); + large_dalloc(tsdn, extent); + } +} + +static inline void +arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { + assert(ptr != NULL); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + szind_t szind; + bool slab; + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, + true, &szind, &slab); + + if (config_debug) { + extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, + rtree_ctx, (uintptr_t)ptr, true); + assert(szind == extent_szind_get(extent)); + assert(szind < SC_NSIZES); + assert(slab == extent_slab_get(extent)); + } + + if (likely(slab)) { + /* Small allocation. */ + arena_dalloc_small(tsdn, ptr); + } else { + arena_dalloc_large_no_tcache(tsdn, ptr, szind); + } +} + +JEMALLOC_ALWAYS_INLINE void +arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind, + bool slow_path) { + if (szind < nhbins) { + if (config_prof && unlikely(szind < SC_NBINS)) { + arena_dalloc_promoted(tsdn, ptr, tcache, slow_path); + } else { + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind, + slow_path); + } + } else { + extent_t *extent = iealloc(tsdn, ptr); + large_dalloc(tsdn, extent); + } +} + +JEMALLOC_ALWAYS_INLINE void +arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, + alloc_ctx_t *alloc_ctx, bool slow_path) { + assert(!tsdn_null(tsdn) || tcache == NULL); + assert(ptr != NULL); + + if (unlikely(tcache == NULL)) { + arena_dalloc_no_tcache(tsdn, ptr); + return; + } + + szind_t szind; + bool slab; + rtree_ctx_t *rtree_ctx; + if (alloc_ctx != NULL) { + szind = alloc_ctx->szind; + slab = alloc_ctx->slab; + assert(szind != SC_NSIZES); + } else { + rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &szind, &slab); + } + + if (config_debug) { + rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); + extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, + rtree_ctx, (uintptr_t)ptr, true); + assert(szind == extent_szind_get(extent)); + assert(szind < SC_NSIZES); + assert(slab == extent_slab_get(extent)); + } + + if (likely(slab)) { + /* Small allocation. */ + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, + slow_path); + } else { + arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path); + } +} + +static inline void +arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) { + assert(ptr != NULL); + assert(size <= SC_LARGE_MAXCLASS); + + szind_t szind; + bool slab; + if (!config_prof || !opt_prof) { + /* + * There is no risk of being confused by a promoted sampled + * object, so base szind and slab on the given size. + */ + szind = sz_size2index(size); + slab = (szind < SC_NBINS); + } + + if ((config_prof && opt_prof) || config_debug) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, + &rtree_ctx_fallback); + + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &szind, &slab); + + assert(szind == sz_size2index(size)); + assert((config_prof && opt_prof) || slab == (szind < SC_NBINS)); + + if (config_debug) { + extent_t *extent = rtree_extent_read(tsdn, + &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); + assert(szind == extent_szind_get(extent)); + assert(slab == extent_slab_get(extent)); + } + } + + if (likely(slab)) { + /* Small allocation. */ + arena_dalloc_small(tsdn, ptr); + } else { + arena_dalloc_large_no_tcache(tsdn, ptr, szind); + } +} + +JEMALLOC_ALWAYS_INLINE void +arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + alloc_ctx_t *alloc_ctx, bool slow_path) { + assert(!tsdn_null(tsdn) || tcache == NULL); + assert(ptr != NULL); + assert(size <= SC_LARGE_MAXCLASS); + + if (unlikely(tcache == NULL)) { + arena_sdalloc_no_tcache(tsdn, ptr, size); + return; + } + + szind_t szind; + bool slab; + alloc_ctx_t local_ctx; + if (config_prof && opt_prof) { + if (alloc_ctx == NULL) { + /* Uncommon case and should be a static check. */ + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, + &rtree_ctx_fallback); + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &local_ctx.szind, + &local_ctx.slab); + assert(local_ctx.szind == sz_size2index(size)); + alloc_ctx = &local_ctx; + } + slab = alloc_ctx->slab; + szind = alloc_ctx->szind; + } else { + /* + * There is no risk of being confused by a promoted sampled + * object, so base szind and slab on the given size. + */ + szind = sz_size2index(size); + slab = (szind < SC_NBINS); + } + + if (config_debug) { + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &szind, &slab); + extent_t *extent = rtree_extent_read(tsdn, + &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); + assert(szind == extent_szind_get(extent)); + assert(slab == extent_slab_get(extent)); + } + + if (likely(slab)) { + /* Small allocation. */ + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, + slow_path); + } else { + arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path); + } +} + +#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/arena_stats.h b/deps/jemalloc/include/jemalloc/internal/arena_stats.h new file mode 100644 index 0000000..23949ed --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/arena_stats.h @@ -0,0 +1,271 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H +#define JEMALLOC_INTERNAL_ARENA_STATS_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/sc.h" + +JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS + +/* + * In those architectures that support 64-bit atomics, we use atomic updates for + * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize + * externally. + */ +#ifdef JEMALLOC_ATOMIC_U64 +typedef atomic_u64_t arena_stats_u64_t; +#else +/* Must hold the arena stats mutex while reading atomically. */ +typedef uint64_t arena_stats_u64_t; +#endif + +typedef struct arena_stats_large_s arena_stats_large_t; +struct arena_stats_large_s { + /* + * Total number of allocation/deallocation requests served directly by + * the arena. + */ + arena_stats_u64_t nmalloc; + arena_stats_u64_t ndalloc; + + /* + * Number of allocation requests that correspond to this size class. + * This includes requests served by tcache, though tcache only + * periodically merges into this counter. + */ + arena_stats_u64_t nrequests; /* Partially derived. */ + /* + * Number of tcache fills / flushes for large (similarly, periodically + * merged). Note that there is no large tcache batch-fill currently + * (i.e. only fill 1 at a time); however flush may be batched. + */ + arena_stats_u64_t nfills; /* Partially derived. */ + arena_stats_u64_t nflushes; /* Partially derived. */ + + /* Current number of allocations of this size class. */ + size_t curlextents; /* Derived. */ +}; + +typedef struct arena_stats_decay_s arena_stats_decay_t; +struct arena_stats_decay_s { + /* Total number of purge sweeps. */ + arena_stats_u64_t npurge; + /* Total number of madvise calls made. */ + arena_stats_u64_t nmadvise; + /* Total number of pages purged. */ + arena_stats_u64_t purged; +}; + +typedef struct arena_stats_extents_s arena_stats_extents_t; +struct arena_stats_extents_s { + /* + * Stats for a given index in the range [0, SC_NPSIZES] in an extents_t. + * We track both bytes and # of extents: two extents in the same bucket + * may have different sizes if adjacent size classes differ by more than + * a page, so bytes cannot always be derived from # of extents. + */ + atomic_zu_t ndirty; + atomic_zu_t dirty_bytes; + atomic_zu_t nmuzzy; + atomic_zu_t muzzy_bytes; + atomic_zu_t nretained; + atomic_zu_t retained_bytes; +}; + +/* + * Arena stats. Note that fields marked "derived" are not directly maintained + * within the arena code; rather their values are derived during stats merge + * requests. + */ +typedef struct arena_stats_s arena_stats_t; +struct arena_stats_s { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_t mtx; +#endif + + /* Number of bytes currently mapped, excluding retained memory. */ + atomic_zu_t mapped; /* Partially derived. */ + + /* + * Number of unused virtual memory bytes currently retained. Retained + * bytes are technically mapped (though always decommitted or purged), + * but they are excluded from the mapped statistic (above). + */ + atomic_zu_t retained; /* Derived. */ + + /* Number of extent_t structs allocated by base, but not being used. */ + atomic_zu_t extent_avail; + + arena_stats_decay_t decay_dirty; + arena_stats_decay_t decay_muzzy; + + atomic_zu_t base; /* Derived. */ + atomic_zu_t internal; + atomic_zu_t resident; /* Derived. */ + atomic_zu_t metadata_thp; + + atomic_zu_t allocated_large; /* Derived. */ + arena_stats_u64_t nmalloc_large; /* Derived. */ + arena_stats_u64_t ndalloc_large; /* Derived. */ + arena_stats_u64_t nfills_large; /* Derived. */ + arena_stats_u64_t nflushes_large; /* Derived. */ + arena_stats_u64_t nrequests_large; /* Derived. */ + + /* VM space had to be leaked (undocumented). Normally 0. */ + atomic_zu_t abandoned_vm; + + /* Number of bytes cached in tcache associated with this arena. */ + atomic_zu_t tcache_bytes; /* Derived. */ + + mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]; + + /* One element for each large size class. */ + arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; + + /* Arena uptime. */ + nstime_t uptime; +}; + +static inline bool +arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { + if (config_debug) { + for (size_t i = 0; i < sizeof(arena_stats_t); i++) { + assert(((char *)arena_stats)[i] == 0); + } + } +#ifndef JEMALLOC_ATOMIC_U64 + if (malloc_mutex_init(&arena_stats->mtx, "arena_stats", + WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { + return true; + } +#endif + /* Memory is zeroed, so there is no need to clear stats. */ + return false; +} + +static inline void +arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_lock(tsdn, &arena_stats->mtx); +#endif +} + +static inline void +arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_unlock(tsdn, &arena_stats->mtx); +#endif +} + +static inline uint64_t +arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, + arena_stats_u64_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_u64(p, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + return *p; +#endif +} + +static inline void +arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, + arena_stats_u64_t *p, uint64_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + atomic_fetch_add_u64(p, x, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + *p += x; +#endif +} + +static inline void +arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, + arena_stats_u64_t *p, uint64_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); + assert(r - x <= r); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + *p -= x; + assert(*p + x >= *p); +#endif +} + +/* + * Non-atomically sets *dst += src. *dst needs external synchronization. + * This lets us avoid the cost of a fetch_add when its unnecessary (note that + * the types here are atomic). + */ +static inline void +arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) { +#ifdef JEMALLOC_ATOMIC_U64 + uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); + atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED); +#else + *dst += src; +#endif +} + +static inline size_t +arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, + atomic_zu_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_zu(p, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + return atomic_load_zu(p, ATOMIC_RELAXED); +#endif +} + +static inline void +arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, + atomic_zu_t *p, size_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + atomic_fetch_add_zu(p, x, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); + atomic_store_zu(p, cur + x, ATOMIC_RELAXED); +#endif +} + +static inline void +arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, + atomic_zu_t *p, size_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); + assert(r - x <= r); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); + atomic_store_zu(p, cur - x, ATOMIC_RELAXED); +#endif +} + +/* Like the _u64 variant, needs an externally synchronized *dst. */ +static inline void +arena_stats_accum_zu(atomic_zu_t *dst, size_t src) { + size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); + atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED); +} + +static inline void +arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, + szind_t szind, uint64_t nrequests) { + arena_stats_lock(tsdn, arena_stats); + arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS]; + arena_stats_add_u64(tsdn, arena_stats, &lstats->nrequests, nrequests); + arena_stats_add_u64(tsdn, arena_stats, &lstats->nflushes, 1); + arena_stats_unlock(tsdn, arena_stats); +} + +static inline void +arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) { + arena_stats_lock(tsdn, arena_stats); + arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size); + arena_stats_unlock(tsdn, arena_stats); +} + +#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/arena_structs_a.h b/deps/jemalloc/include/jemalloc/internal/arena_structs_a.h new file mode 100644 index 0000000..46aa77c --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/arena_structs_a.h @@ -0,0 +1,11 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H +#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H + +#include "jemalloc/internal/bitmap.h" + +struct arena_slab_data_s { + /* Per region allocated/deallocated bitmap. */ + bitmap_t bitmap[BITMAP_GROUPS_MAX]; +}; + +#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/arena_structs_b.h b/deps/jemalloc/include/jemalloc/internal/arena_structs_b.h new file mode 100644 index 0000000..eeab57f --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/arena_structs_b.h @@ -0,0 +1,232 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H +#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H + +#include "jemalloc/internal/arena_stats.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bin.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/ticker.h" + +struct arena_decay_s { + /* Synchronizes all non-atomic fields. */ + malloc_mutex_t mtx; + /* + * True if a thread is currently purging the extents associated with + * this decay structure. + */ + bool purging; + /* + * Approximate time in milliseconds from the creation of a set of unused + * dirty pages until an equivalent set of unused dirty pages is purged + * and/or reused. + */ + atomic_zd_t time_ms; + /* time / SMOOTHSTEP_NSTEPS. */ + nstime_t interval; + /* + * Time at which the current decay interval logically started. We do + * not actually advance to a new epoch until sometime after it starts + * because of scheduling and computation delays, and it is even possible + * to completely skip epochs. In all cases, during epoch advancement we + * merge all relevant activity into the most recently recorded epoch. + */ + nstime_t epoch; + /* Deadline randomness generator. */ + uint64_t jitter_state; + /* + * Deadline for current epoch. This is the sum of interval and per + * epoch jitter which is a uniform random variable in [0..interval). + * Epochs always advance by precise multiples of interval, but we + * randomize the deadline to reduce the likelihood of arenas purging in + * lockstep. + */ + nstime_t deadline; + /* + * Number of unpurged pages at beginning of current epoch. During epoch + * advancement we use the delta between arena->decay_*.nunpurged and + * extents_npages_get(&arena->extents_*) to determine how many dirty + * pages, if any, were generated. + */ + size_t nunpurged; + /* + * Trailing log of how many unused dirty pages were generated during + * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last + * element is the most recent epoch. Corresponding epoch times are + * relative to epoch. + */ + size_t backlog[SMOOTHSTEP_NSTEPS]; + + /* + * Pointer to associated stats. These stats are embedded directly in + * the arena's stats due to how stats structures are shared between the + * arena and ctl code. + * + * Synchronization: Same as associated arena's stats field. */ + arena_stats_decay_t *stats; + /* Peak number of pages in associated extents. Used for debug only. */ + uint64_t ceil_npages; +}; + +struct arena_s { + /* + * Number of threads currently assigned to this arena. Each thread has + * two distinct assignments, one for application-serving allocation, and + * the other for internal metadata allocation. Internal metadata must + * not be allocated from arenas explicitly created via the arenas.create + * mallctl, because the arena..reset mallctl indiscriminately + * discards all allocations for the affected arena. + * + * 0: Application allocation. + * 1: Internal metadata allocation. + * + * Synchronization: atomic. + */ + atomic_u_t nthreads[2]; + + /* Next bin shard for binding new threads. Synchronization: atomic. */ + atomic_u_t binshard_next; + + /* + * When percpu_arena is enabled, to amortize the cost of reading / + * updating the current CPU id, track the most recent thread accessing + * this arena, and only read CPU if there is a mismatch. + */ + tsdn_t *last_thd; + + /* Synchronization: internal. */ + arena_stats_t stats; + + /* + * Lists of tcaches and cache_bin_array_descriptors for extant threads + * associated with this arena. Stats from these are merged + * incrementally, and at exit if opt_stats_print is enabled. + * + * Synchronization: tcache_ql_mtx. + */ + ql_head(tcache_t) tcache_ql; + ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql; + malloc_mutex_t tcache_ql_mtx; + + /* Synchronization: internal. */ + prof_accum_t prof_accum; + + /* + * PRNG state for cache index randomization of large allocation base + * pointers. + * + * Synchronization: atomic. + */ + atomic_zu_t offset_state; + + /* + * Extent serial number generator state. + * + * Synchronization: atomic. + */ + atomic_zu_t extent_sn_next; + + /* + * Represents a dss_prec_t, but atomically. + * + * Synchronization: atomic. + */ + atomic_u_t dss_prec; + + /* + * Number of pages in active extents. + * + * Synchronization: atomic. + */ + atomic_zu_t nactive; + + /* + * Extant large allocations. + * + * Synchronization: large_mtx. + */ + extent_list_t large; + /* Synchronizes all large allocation/update/deallocation. */ + malloc_mutex_t large_mtx; + + /* + * Collections of extents that were previously allocated. These are + * used when allocating extents, in an attempt to re-use address space. + * + * Synchronization: internal. + */ + extents_t extents_dirty; + extents_t extents_muzzy; + extents_t extents_retained; + + /* + * Decay-based purging state, responsible for scheduling extent state + * transitions. + * + * Synchronization: internal. + */ + arena_decay_t decay_dirty; /* dirty --> muzzy */ + arena_decay_t decay_muzzy; /* muzzy --> retained */ + + /* + * Next extent size class in a growing series to use when satisfying a + * request via the extent hooks (only if opt_retain). This limits the + * number of disjoint virtual memory ranges so that extent merging can + * be effective even if multiple arenas' extent allocation requests are + * highly interleaved. + * + * retain_grow_limit is the max allowed size ind to expand (unless the + * required size is greater). Default is no limit, and controlled + * through mallctl only. + * + * Synchronization: extent_grow_mtx + */ + pszind_t extent_grow_next; + pszind_t retain_grow_limit; + malloc_mutex_t extent_grow_mtx; + + /* + * Available extent structures that were allocated via + * base_alloc_extent(). + * + * Synchronization: extent_avail_mtx. + */ + extent_tree_t extent_avail; + atomic_zu_t extent_avail_cnt; + malloc_mutex_t extent_avail_mtx; + + /* + * bins is used to store heaps of free regions. + * + * Synchronization: internal. + */ + bins_t bins[SC_NBINS]; + + /* + * Base allocator, from which arena metadata are allocated. + * + * Synchronization: internal. + */ + base_t *base; + /* Used to determine uptime. Read-only after initialization. */ + nstime_t create_time; +}; + +/* Used in conjunction with tsd for fast arena-related context lookup. */ +struct arena_tdata_s { + ticker_t decay_ticker; +}; + +/* Used to pass rtree lookup context down the path. */ +struct alloc_ctx_s { + szind_t szind; + bool slab; +}; + +#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/arena_types.h b/deps/jemalloc/include/jemalloc/internal/arena_types.h new file mode 100644 index 0000000..624937e --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/arena_types.h @@ -0,0 +1,51 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H +#define JEMALLOC_INTERNAL_ARENA_TYPES_H + +#include "jemalloc/internal/sc.h" + +/* Maximum number of regions in one slab. */ +#define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN) +#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS) + +/* Default decay times in milliseconds. */ +#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000) +#define MUZZY_DECAY_MS_DEFAULT (0) +/* Number of event ticks between time checks. */ +#define DECAY_NTICKS_PER_UPDATE 1000 + +typedef struct arena_slab_data_s arena_slab_data_t; +typedef struct arena_decay_s arena_decay_t; +typedef struct arena_s arena_t; +typedef struct arena_tdata_s arena_tdata_t; +typedef struct alloc_ctx_s alloc_ctx_t; + +typedef enum { + percpu_arena_mode_names_base = 0, /* Used for options processing. */ + + /* + * *_uninit are used only during bootstrapping, and must correspond + * to initialized variant plus percpu_arena_mode_enabled_base. + */ + percpu_arena_uninit = 0, + per_phycpu_arena_uninit = 1, + + /* All non-disabled modes must come after percpu_arena_disabled. */ + percpu_arena_disabled = 2, + + percpu_arena_mode_names_limit = 3, /* Used for options processing. */ + percpu_arena_mode_enabled_base = 3, + + percpu_arena = 3, + per_phycpu_arena = 4 /* Hyper threads share arena. */ +} percpu_arena_mode_t; + +#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base) +#define PERCPU_ARENA_DEFAULT percpu_arena_disabled + +/* + * When allocation_size >= oversize_threshold, use the dedicated huge arena + * (unless have explicitly spicified arena index). 0 disables the feature. + */ +#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20) + +#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/assert.h b/deps/jemalloc/include/jemalloc/internal/assert.h new file mode 100644 index 0000000..be4d45b --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/assert.h @@ -0,0 +1,56 @@ +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/util.h" + +/* + * Define a custom assert() in order to reduce the chances of deadlock during + * assertion failure. + */ +#ifndef assert +#define assert(e) do { \ + if (unlikely(config_debug && !(e))) { \ + malloc_printf( \ + ": %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ +} while (0) +#endif + +#ifndef not_reached +#define not_reached() do { \ + if (config_debug) { \ + malloc_printf( \ + ": %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ + unreachable(); \ +} while (0) +#endif + +#ifndef not_implemented +#define not_implemented() do { \ + if (config_debug) { \ + malloc_printf(": %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) +#endif + +#ifndef assert_not_implemented +#define assert_not_implemented(e) do { \ + if (unlikely(config_debug && !(e))) { \ + not_implemented(); \ + } \ +} while (0) +#endif + +/* Use to assert a particular configuration, e.g., cassert(config_debug). */ +#ifndef cassert +#define cassert(c) do { \ + if (unlikely(!(c))) { \ + not_reached(); \ + } \ +} while (0) +#endif diff --git a/deps/jemalloc/include/jemalloc/internal/atomic.h b/deps/jemalloc/include/jemalloc/internal/atomic.h new file mode 100644 index 0000000..a76f54c --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/atomic.h @@ -0,0 +1,86 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_H +#define JEMALLOC_INTERNAL_ATOMIC_H + +#define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE + +#define JEMALLOC_U8_ATOMICS +#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) +# include "jemalloc/internal/atomic_gcc_atomic.h" +# if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS) +# undef JEMALLOC_U8_ATOMICS +# endif +#elif defined(JEMALLOC_GCC_SYNC_ATOMICS) +# include "jemalloc/internal/atomic_gcc_sync.h" +# if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS) +# undef JEMALLOC_U8_ATOMICS +# endif +#elif defined(_MSC_VER) +# include "jemalloc/internal/atomic_msvc.h" +#elif defined(JEMALLOC_C11_ATOMICS) +# include "jemalloc/internal/atomic_c11.h" +#else +# error "Don't have atomics implemented on this platform." +#endif + +/* + * This header gives more or less a backport of C11 atomics. The user can write + * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate + * counterparts of the C11 atomic functions for type, as so: + * JEMALLOC_GENERATE_ATOMICS(int *, pi, 3); + * and then write things like: + * int *some_ptr; + * atomic_pi_t atomic_ptr_to_int; + * atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED); + * int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL); + * assert(some_ptr == prev_value); + * and expect things to work in the obvious way. + * + * Also included (with naming differences to avoid conflicts with the standard + * library): + * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence). + * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT). + */ + +/* + * Pure convenience, so that we don't have to type "atomic_memory_order_" + * quite so often. + */ +#define ATOMIC_RELAXED atomic_memory_order_relaxed +#define ATOMIC_ACQUIRE atomic_memory_order_acquire +#define ATOMIC_RELEASE atomic_memory_order_release +#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel +#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst + +/* + * Not all platforms have 64-bit atomics. If we do, this #define exposes that + * fact. + */ +#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) +# define JEMALLOC_ATOMIC_U64 +#endif + +JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR) + +/* + * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only + * platform that actually needs to know the size, MSVC. + */ +JEMALLOC_GENERATE_ATOMICS(bool, b, 0) + +JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT) + +JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR) + +JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR) + +JEMALLOC_GENERATE_INT_ATOMICS(uint8_t, u8, 0) + +JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2) + +#ifdef JEMALLOC_ATOMIC_U64 +JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3) +#endif + +#undef ATOMIC_INLINE + +#endif /* JEMALLOC_INTERNAL_ATOMIC_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/atomic_c11.h b/deps/jemalloc/include/jemalloc/internal/atomic_c11.h new file mode 100644 index 0000000..a5f9313 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/atomic_c11.h @@ -0,0 +1,97 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H +#define JEMALLOC_INTERNAL_ATOMIC_C11_H + +#include + +#define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__) + +#define atomic_memory_order_t memory_order +#define atomic_memory_order_relaxed memory_order_relaxed +#define atomic_memory_order_acquire memory_order_acquire +#define atomic_memory_order_release memory_order_release +#define atomic_memory_order_acq_rel memory_order_acq_rel +#define atomic_memory_order_seq_cst memory_order_seq_cst + +#define atomic_fence atomic_thread_fence + +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +typedef _Atomic(type) atomic_##short_type##_t; \ + \ +ATOMIC_INLINE type \ +atomic_load_##short_type(const atomic_##short_type##_t *a, \ + atomic_memory_order_t mo) { \ + /* \ + * A strict interpretation of the C standard prevents \ + * atomic_load from taking a const argument, but it's \ + * convenient for our purposes. This cast is a workaround. \ + */ \ + atomic_##short_type##_t* a_nonconst = \ + (atomic_##short_type##_t*)a; \ + return atomic_load_explicit(a_nonconst, mo); \ +} \ + \ +ATOMIC_INLINE void \ +atomic_store_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + atomic_store_explicit(a, val, mo); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return atomic_exchange_explicit(a, val, mo); \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return atomic_compare_exchange_weak_explicit(a, expected, \ + desired, success_mo, failure_mo); \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return atomic_compare_exchange_strong_explicit(a, expected, \ + desired, success_mo, failure_mo); \ +} + +/* + * Integral types have some special operations available that non-integral ones + * lack. + */ +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ + \ +ATOMIC_INLINE type \ +atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_add_explicit(a, val, mo); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_sub_explicit(a, val, mo); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_and_explicit(a, val, mo); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_or_explicit(a, val, mo); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_xor_explicit(a, val, mo); \ +} + +#endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h b/deps/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h new file mode 100644 index 0000000..471515e --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h @@ -0,0 +1,129 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H +#define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H + +#include "jemalloc/internal/assert.h" + +#define ATOMIC_INIT(...) {__VA_ARGS__} + +typedef enum { + atomic_memory_order_relaxed, + atomic_memory_order_acquire, + atomic_memory_order_release, + atomic_memory_order_acq_rel, + atomic_memory_order_seq_cst +} atomic_memory_order_t; + +ATOMIC_INLINE int +atomic_enum_to_builtin(atomic_memory_order_t mo) { + switch (mo) { + case atomic_memory_order_relaxed: + return __ATOMIC_RELAXED; + case atomic_memory_order_acquire: + return __ATOMIC_ACQUIRE; + case atomic_memory_order_release: + return __ATOMIC_RELEASE; + case atomic_memory_order_acq_rel: + return __ATOMIC_ACQ_REL; + case atomic_memory_order_seq_cst: + return __ATOMIC_SEQ_CST; + } + /* Can't happen; the switch is exhaustive. */ + not_reached(); +} + +ATOMIC_INLINE void +atomic_fence(atomic_memory_order_t mo) { + __atomic_thread_fence(atomic_enum_to_builtin(mo)); +} + +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +typedef struct { \ + type repr; \ +} atomic_##short_type##_t; \ + \ +ATOMIC_INLINE type \ +atomic_load_##short_type(const atomic_##short_type##_t *a, \ + atomic_memory_order_t mo) { \ + type result; \ + __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \ + return result; \ +} \ + \ +ATOMIC_INLINE void \ +atomic_store_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + type result; \ + __atomic_exchange(&a->repr, &val, &result, \ + atomic_enum_to_builtin(mo)); \ + return result; \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ + UNUSED type *expected, type desired, \ + atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return __atomic_compare_exchange(&a->repr, expected, &desired, \ + true, atomic_enum_to_builtin(success_mo), \ + atomic_enum_to_builtin(failure_mo)); \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ + UNUSED type *expected, type desired, \ + atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return __atomic_compare_exchange(&a->repr, expected, &desired, \ + false, \ + atomic_enum_to_builtin(success_mo), \ + atomic_enum_to_builtin(failure_mo)); \ +} + + +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ + \ +ATOMIC_INLINE type \ +atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_add(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_sub(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_and(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_or(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_xor(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} + +#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h b/deps/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h new file mode 100644 index 0000000..e02b7cb --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h @@ -0,0 +1,195 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H +#define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H + +#define ATOMIC_INIT(...) {__VA_ARGS__} + +typedef enum { + atomic_memory_order_relaxed, + atomic_memory_order_acquire, + atomic_memory_order_release, + atomic_memory_order_acq_rel, + atomic_memory_order_seq_cst +} atomic_memory_order_t; + +ATOMIC_INLINE void +atomic_fence(atomic_memory_order_t mo) { + /* Easy cases first: no barrier, and full barrier. */ + if (mo == atomic_memory_order_relaxed) { + asm volatile("" ::: "memory"); + return; + } + if (mo == atomic_memory_order_seq_cst) { + asm volatile("" ::: "memory"); + __sync_synchronize(); + asm volatile("" ::: "memory"); + return; + } + asm volatile("" ::: "memory"); +# if defined(__i386__) || defined(__x86_64__) + /* This is implicit on x86. */ +# elif defined(__ppc64__) + asm volatile("lwsync"); +# elif defined(__ppc__) + asm volatile("sync"); +# elif defined(__sparc__) && defined(__arch64__) + if (mo == atomic_memory_order_acquire) { + asm volatile("membar #LoadLoad | #LoadStore"); + } else if (mo == atomic_memory_order_release) { + asm volatile("membar #LoadStore | #StoreStore"); + } else { + asm volatile("membar #LoadLoad | #LoadStore | #StoreStore"); + } +# else + __sync_synchronize(); +# endif + asm volatile("" ::: "memory"); +} + +/* + * A correct implementation of seq_cst loads and stores on weakly ordered + * architectures could do either of the following: + * 1. store() is weak-fence -> store -> strong fence, load() is load -> + * strong-fence. + * 2. store() is strong-fence -> store, load() is strong-fence -> load -> + * weak-fence. + * The tricky thing is, load() and store() above can be the load or store + * portions of a gcc __sync builtin, so we have to follow GCC's lead, which + * means going with strategy 2. + * On strongly ordered architectures, the natural strategy is to stick a strong + * fence after seq_cst stores, and have naked loads. So we want the strong + * fences in different places on different architectures. + * atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to + * accomplish this. + */ + +ATOMIC_INLINE void +atomic_pre_sc_load_fence() { +# if defined(__i386__) || defined(__x86_64__) || \ + (defined(__sparc__) && defined(__arch64__)) + atomic_fence(atomic_memory_order_relaxed); +# else + atomic_fence(atomic_memory_order_seq_cst); +# endif +} + +ATOMIC_INLINE void +atomic_post_sc_store_fence() { +# if defined(__i386__) || defined(__x86_64__) || \ + (defined(__sparc__) && defined(__arch64__)) + atomic_fence(atomic_memory_order_seq_cst); +# else + atomic_fence(atomic_memory_order_relaxed); +# endif + +} + +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +typedef struct { \ + type volatile repr; \ +} atomic_##short_type##_t; \ + \ +ATOMIC_INLINE type \ +atomic_load_##short_type(const atomic_##short_type##_t *a, \ + atomic_memory_order_t mo) { \ + if (mo == atomic_memory_order_seq_cst) { \ + atomic_pre_sc_load_fence(); \ + } \ + type result = a->repr; \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_acquire); \ + } \ + return result; \ +} \ + \ +ATOMIC_INLINE void \ +atomic_store_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_release); \ + } \ + a->repr = val; \ + if (mo == atomic_memory_order_seq_cst) { \ + atomic_post_sc_store_fence(); \ + } \ +} \ + \ +ATOMIC_INLINE type \ +atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + /* \ + * Because of FreeBSD, we care about gcc 4.2, which doesn't have\ + * an atomic exchange builtin. We fake it with a CAS loop. \ + */ \ + while (true) { \ + type old = a->repr; \ + if (__sync_bool_compare_and_swap(&a->repr, old, val)) { \ + return old; \ + } \ + } \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, \ + atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ + desired); \ + if (prev == *expected) { \ + return true; \ + } else { \ + *expected = prev; \ + return false; \ + } \ +} \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, \ + atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ + desired); \ + if (prev == *expected) { \ + return true; \ + } else { \ + *expected = prev; \ + return false; \ + } \ +} + +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ + \ +ATOMIC_INLINE type \ +atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_add(&a->repr, val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_sub(&a->repr, val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_and(&a->repr, val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_or(&a->repr, val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_xor(&a->repr, val); \ +} + +#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/atomic_msvc.h b/deps/jemalloc/include/jemalloc/internal/atomic_msvc.h new file mode 100644 index 0000000..67057ce --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/atomic_msvc.h @@ -0,0 +1,158 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H +#define JEMALLOC_INTERNAL_ATOMIC_MSVC_H + +#define ATOMIC_INIT(...) {__VA_ARGS__} + +typedef enum { + atomic_memory_order_relaxed, + atomic_memory_order_acquire, + atomic_memory_order_release, + atomic_memory_order_acq_rel, + atomic_memory_order_seq_cst +} atomic_memory_order_t; + +typedef char atomic_repr_0_t; +typedef short atomic_repr_1_t; +typedef long atomic_repr_2_t; +typedef __int64 atomic_repr_3_t; + +ATOMIC_INLINE void +atomic_fence(atomic_memory_order_t mo) { + _ReadWriteBarrier(); +# if defined(_M_ARM) || defined(_M_ARM64) + /* ARM needs a barrier for everything but relaxed. */ + if (mo != atomic_memory_order_relaxed) { + MemoryBarrier(); + } +# elif defined(_M_IX86) || defined (_M_X64) + /* x86 needs a barrier only for seq_cst. */ + if (mo == atomic_memory_order_seq_cst) { + MemoryBarrier(); + } +# else +# error "Don't know how to create atomics for this platform for MSVC." +# endif + _ReadWriteBarrier(); +} + +#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t + +#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b) +#define ATOMIC_RAW_CONCAT(a, b) a ## b + +#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \ + base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size)) + +#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \ + ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size) + +#define ATOMIC_INTERLOCKED_SUFFIX_0 8 +#define ATOMIC_INTERLOCKED_SUFFIX_1 16 +#define ATOMIC_INTERLOCKED_SUFFIX_2 +#define ATOMIC_INTERLOCKED_SUFFIX_3 64 + +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ +typedef struct { \ + ATOMIC_INTERLOCKED_REPR(lg_size) repr; \ +} atomic_##short_type##_t; \ + \ +ATOMIC_INLINE type \ +atomic_load_##short_type(const atomic_##short_type##_t *a, \ + atomic_memory_order_t mo) { \ + ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_acquire); \ + } \ + return (type) ret; \ +} \ + \ +ATOMIC_INLINE void \ +atomic_store_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_release); \ + } \ + a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \ + if (mo == atomic_memory_order_seq_cst) { \ + atomic_fence(atomic_memory_order_seq_cst); \ + } \ +} \ + \ +ATOMIC_INLINE type \ +atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \ + lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + ATOMIC_INTERLOCKED_REPR(lg_size) e = \ + (ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \ + ATOMIC_INTERLOCKED_REPR(lg_size) d = \ + (ATOMIC_INTERLOCKED_REPR(lg_size))desired; \ + ATOMIC_INTERLOCKED_REPR(lg_size) old = \ + ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \ + lg_size)(&a->repr, d, e); \ + if (old == e) { \ + return true; \ + } else { \ + *expected = (type)old; \ + return false; \ + } \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + /* We implement the weak version with strong semantics. */ \ + return atomic_compare_exchange_weak_##short_type(a, expected, \ + desired, success_mo, failure_mo); \ +} + + +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \ +JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ + \ +ATOMIC_INLINE type \ +atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \ + lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + /* \ + * MSVC warns on negation of unsigned operands, but for us it \ + * gives exactly the right semantics (MAX_TYPE + 1 - operand). \ + */ \ + __pragma(warning(push)) \ + __pragma(warning(disable: 4146)) \ + return atomic_fetch_add_##short_type(a, -val, mo); \ + __pragma(warning(pop)) \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \ + &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \ + &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \ + &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} + +#endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/background_thread_externs.h b/deps/jemalloc/include/jemalloc/internal/background_thread_externs.h new file mode 100644 index 0000000..0f997e1 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/background_thread_externs.h @@ -0,0 +1,32 @@ +#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H +#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H + +extern bool opt_background_thread; +extern size_t opt_max_background_threads; +extern malloc_mutex_t background_thread_lock; +extern atomic_b_t background_thread_enabled_state; +extern size_t n_background_threads; +extern size_t max_background_threads; +extern background_thread_info_t *background_thread_info; + +bool background_thread_create(tsd_t *tsd, unsigned arena_ind); +bool background_threads_enable(tsd_t *tsd); +bool background_threads_disable(tsd_t *tsd); +void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, size_t npages_new); +void background_thread_prefork0(tsdn_t *tsdn); +void background_thread_prefork1(tsdn_t *tsdn); +void background_thread_postfork_parent(tsdn_t *tsdn); +void background_thread_postfork_child(tsdn_t *tsdn); +bool background_thread_stats_read(tsdn_t *tsdn, + background_thread_stats_t *stats); +void background_thread_ctl_init(tsdn_t *tsdn); + +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER +extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *, + void *(*)(void *), void *__restrict); +#endif +bool background_thread_boot0(void); +bool background_thread_boot1(tsdn_t *tsdn); + +#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h b/deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h new file mode 100644 index 0000000..f85e86f --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h @@ -0,0 +1,62 @@ +#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H +#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H + +JEMALLOC_ALWAYS_INLINE bool +background_thread_enabled(void) { + return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED); +} + +JEMALLOC_ALWAYS_INLINE void +background_thread_enabled_set(tsdn_t *tsdn, bool state) { + malloc_mutex_assert_owner(tsdn, &background_thread_lock); + atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED); +} + +JEMALLOC_ALWAYS_INLINE background_thread_info_t * +arena_background_thread_info_get(arena_t *arena) { + unsigned arena_ind = arena_ind_get(arena); + return &background_thread_info[arena_ind % max_background_threads]; +} + +JEMALLOC_ALWAYS_INLINE background_thread_info_t * +background_thread_info_get(size_t ind) { + return &background_thread_info[ind % max_background_threads]; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +background_thread_wakeup_time_get(background_thread_info_t *info) { + uint64_t next_wakeup = nstime_ns(&info->next_wakeup); + assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) == + (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP)); + return next_wakeup; +} + +JEMALLOC_ALWAYS_INLINE void +background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info, + uint64_t wakeup_time) { + malloc_mutex_assert_owner(tsdn, &info->mtx); + atomic_store_b(&info->indefinite_sleep, + wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE); + nstime_init(&info->next_wakeup, wakeup_time); +} + +JEMALLOC_ALWAYS_INLINE bool +background_thread_indefinite_sleep(background_thread_info_t *info) { + return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE); +} + +JEMALLOC_ALWAYS_INLINE void +arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena, + bool is_background_thread) { + if (!background_thread_enabled() || is_background_thread) { + return; + } + background_thread_info_t *info = + arena_background_thread_info_get(arena); + if (background_thread_indefinite_sleep(info)) { + background_thread_interval_check(tsdn, arena, + &arena->decay_dirty, 0); + } +} + +#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/background_thread_structs.h b/deps/jemalloc/include/jemalloc/internal/background_thread_structs.h new file mode 100644 index 0000000..c02aa43 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/background_thread_structs.h @@ -0,0 +1,54 @@ +#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H +#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H + +/* This file really combines "structs" and "types", but only transitionally. */ + +#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK) +# define JEMALLOC_PTHREAD_CREATE_WRAPPER +#endif + +#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX +#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT +#define DEFAULT_NUM_BACKGROUND_THREAD 4 + +typedef enum { + background_thread_stopped, + background_thread_started, + /* Thread waits on the global lock when paused (for arena_reset). */ + background_thread_paused, +} background_thread_state_t; + +struct background_thread_info_s { +#ifdef JEMALLOC_BACKGROUND_THREAD + /* Background thread is pthread specific. */ + pthread_t thread; + pthread_cond_t cond; +#endif + malloc_mutex_t mtx; + background_thread_state_t state; + /* When true, it means no wakeup scheduled. */ + atomic_b_t indefinite_sleep; + /* Next scheduled wakeup time (absolute time in ns). */ + nstime_t next_wakeup; + /* + * Since the last background thread run, newly added number of pages + * that need to be purged by the next wakeup. This is adjusted on + * epoch advance, and is used to determine whether we should signal the + * background thread to wake up earlier. + */ + size_t npages_to_purge_new; + /* Stats: total number of runs since started. */ + uint64_t tot_n_runs; + /* Stats: total sleep time since started. */ + nstime_t tot_sleep_time; +}; +typedef struct background_thread_info_s background_thread_info_t; + +struct background_thread_stats_s { + size_t num_threads; + uint64_t num_runs; + nstime_t run_interval; +}; +typedef struct background_thread_stats_s background_thread_stats_t; + +#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/base_externs.h b/deps/jemalloc/include/jemalloc/internal/base_externs.h new file mode 100644 index 0000000..7b705c9 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/base_externs.h @@ -0,0 +1,22 @@ +#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H +#define JEMALLOC_INTERNAL_BASE_EXTERNS_H + +extern metadata_thp_mode_t opt_metadata_thp; +extern const char *metadata_thp_mode_names[]; + +base_t *b0get(void); +base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); +void base_delete(tsdn_t *tsdn, base_t *base); +extent_hooks_t *base_extent_hooks_get(base_t *base); +extent_hooks_t *base_extent_hooks_set(base_t *base, + extent_hooks_t *extent_hooks); +void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment); +extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base); +void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, + size_t *resident, size_t *mapped, size_t *n_thp); +void base_prefork(tsdn_t *tsdn, base_t *base); +void base_postfork_parent(tsdn_t *tsdn, base_t *base); +void base_postfork_child(tsdn_t *tsdn, base_t *base); +bool base_boot(tsdn_t *tsdn); + +#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/base_inlines.h b/deps/jemalloc/include/jemalloc/internal/base_inlines.h new file mode 100644 index 0000000..aec0e2e --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/base_inlines.h @@ -0,0 +1,13 @@ +#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H +#define JEMALLOC_INTERNAL_BASE_INLINES_H + +static inline unsigned +base_ind_get(const base_t *base) { + return base->ind; +} + +static inline bool +metadata_thp_enabled(void) { + return (opt_metadata_thp != metadata_thp_disabled); +} +#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/base_structs.h b/deps/jemalloc/include/jemalloc/internal/base_structs.h new file mode 100644 index 0000000..07f214e --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/base_structs.h @@ -0,0 +1,59 @@ +#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H +#define JEMALLOC_INTERNAL_BASE_STRUCTS_H + +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/sc.h" + +/* Embedded at the beginning of every block of base-managed virtual memory. */ +struct base_block_s { + /* Total size of block's virtual memory mapping. */ + size_t size; + + /* Next block in list of base's blocks. */ + base_block_t *next; + + /* Tracks unused trailing space. */ + extent_t extent; +}; + +struct base_s { + /* Associated arena's index within the arenas array. */ + unsigned ind; + + /* + * User-configurable extent hook functions. Points to an + * extent_hooks_t. + */ + atomic_p_t extent_hooks; + + /* Protects base_alloc() and base_stats_get() operations. */ + malloc_mutex_t mtx; + + /* Using THP when true (metadata_thp auto mode). */ + bool auto_thp_switched; + /* + * Most recent size class in the series of increasingly large base + * extents. Logarithmic spacing between subsequent allocations ensures + * that the total number of distinct mappings remains small. + */ + pszind_t pind_last; + + /* Serial number generation state. */ + size_t extent_sn_next; + + /* Chain of all blocks associated with base. */ + base_block_t *blocks; + + /* Heap of extents that track unused trailing space within blocks. */ + extent_heap_t avail[SC_NSIZES]; + + /* Stats, only maintained if config_stats. */ + size_t allocated; + size_t resident; + size_t mapped; + /* Number of THP regions touched. */ + size_t n_thp; +}; + +#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/base_types.h b/deps/jemalloc/include/jemalloc/internal/base_types.h new file mode 100644 index 0000000..b6db77d --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/base_types.h @@ -0,0 +1,33 @@ +#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H +#define JEMALLOC_INTERNAL_BASE_TYPES_H + +typedef struct base_block_s base_block_t; +typedef struct base_s base_t; + +#define METADATA_THP_DEFAULT metadata_thp_disabled + +/* + * In auto mode, arenas switch to huge pages for the base allocator on the + * second base block. a0 switches to thp on the 5th block (after 20 megabytes + * of metadata), since more metadata (e.g. rtree nodes) come from a0's base. + */ + +#define BASE_AUTO_THP_THRESHOLD 2 +#define BASE_AUTO_THP_THRESHOLD_A0 5 + +typedef enum { + metadata_thp_disabled = 0, + /* + * Lazily enable hugepage for metadata. To avoid high RSS caused by THP + * + low usage arena (i.e. THP becomes a significant percentage), the + * "auto" option only starts using THP after a base allocator used up + * the first THP region. Starting from the second hugepage (in a single + * arena), "auto" behaves the same as "always", i.e. madvise hugepage + * right away. + */ + metadata_thp_auto = 1, + metadata_thp_always = 2, + metadata_thp_mode_limit = 3 +} metadata_thp_mode_t; + +#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/bin.h b/deps/jemalloc/include/jemalloc/internal/bin.h new file mode 100644 index 0000000..8547e89 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/bin.h @@ -0,0 +1,123 @@ +#ifndef JEMALLOC_INTERNAL_BIN_H +#define JEMALLOC_INTERNAL_BIN_H + +#include "jemalloc/internal/bin_stats.h" +#include "jemalloc/internal/bin_types.h" +#include "jemalloc/internal/extent_types.h" +#include "jemalloc/internal/extent_structs.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/sc.h" + +/* + * A bin contains a set of extents that are currently being used for slab + * allocations. + */ + +/* + * Read-only information associated with each element of arena_t's bins array + * is stored separately, partly to reduce memory usage (only one copy, rather + * than one per arena), but mainly to avoid false cacheline sharing. + * + * Each slab has the following layout: + * + * /--------------------\ + * | region 0 | + * |--------------------| + * | region 1 | + * |--------------------| + * | ... | + * | ... | + * | ... | + * |--------------------| + * | region nregs-1 | + * \--------------------/ + */ +typedef struct bin_info_s bin_info_t; +struct bin_info_s { + /* Size of regions in a slab for this bin's size class. */ + size_t reg_size; + + /* Total size of a slab for this bin's size class. */ + size_t slab_size; + + /* Total number of regions in a slab for this bin's size class. */ + uint32_t nregs; + + /* Number of sharded bins in each arena for this size class. */ + uint32_t n_shards; + + /* + * Metadata used to manipulate bitmaps for slabs associated with this + * bin. + */ + bitmap_info_t bitmap_info; +}; + +extern bin_info_t bin_infos[SC_NBINS]; + +typedef struct bin_s bin_t; +struct bin_s { + /* All operations on bin_t fields require lock ownership. */ + malloc_mutex_t lock; + + /* + * Current slab being used to service allocations of this bin's size + * class. slabcur is independent of slabs_{nonfull,full}; whenever + * slabcur is reassigned, the previous slab must be deallocated or + * inserted into slabs_{nonfull,full}. + */ + extent_t *slabcur; + + /* + * Heap of non-full slabs. This heap is used to assure that new + * allocations come from the non-full slab that is oldest/lowest in + * memory. + */ + extent_heap_t slabs_nonfull; + + /* List used to track full slabs. */ + extent_list_t slabs_full; + + /* Bin statistics. */ + bin_stats_t stats; +}; + +/* A set of sharded bins of the same size class. */ +typedef struct bins_s bins_t; +struct bins_s { + /* Sharded bins. Dynamically sized. */ + bin_t *bin_shards; +}; + +void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]); +bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size, + size_t end_size, size_t nshards); +void bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]); + +/* Initializes a bin to empty. Returns true on error. */ +bool bin_init(bin_t *bin); + +/* Forking. */ +void bin_prefork(tsdn_t *tsdn, bin_t *bin); +void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin); +void bin_postfork_child(tsdn_t *tsdn, bin_t *bin); + +/* Stats. */ +static inline void +bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) { + malloc_mutex_lock(tsdn, &bin->lock); + malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock); + dst_bin_stats->nmalloc += bin->stats.nmalloc; + dst_bin_stats->ndalloc += bin->stats.ndalloc; + dst_bin_stats->nrequests += bin->stats.nrequests; + dst_bin_stats->curregs += bin->stats.curregs; + dst_bin_stats->nfills += bin->stats.nfills; + dst_bin_stats->nflushes += bin->stats.nflushes; + dst_bin_stats->nslabs += bin->stats.nslabs; + dst_bin_stats->reslabs += bin->stats.reslabs; + dst_bin_stats->curslabs += bin->stats.curslabs; + dst_bin_stats->nonfull_slabs += bin->stats.nonfull_slabs; + malloc_mutex_unlock(tsdn, &bin->lock); +} + +#endif /* JEMALLOC_INTERNAL_BIN_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/bin_stats.h b/deps/jemalloc/include/jemalloc/internal/bin_stats.h new file mode 100644 index 0000000..d04519c --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/bin_stats.h @@ -0,0 +1,54 @@ +#ifndef JEMALLOC_INTERNAL_BIN_STATS_H +#define JEMALLOC_INTERNAL_BIN_STATS_H + +#include "jemalloc/internal/mutex_prof.h" + +typedef struct bin_stats_s bin_stats_t; +struct bin_stats_s { + /* + * Total number of allocation/deallocation requests served directly by + * the bin. Note that tcache may allocate an object, then recycle it + * many times, resulting many increments to nrequests, but only one + * each to nmalloc and ndalloc. + */ + uint64_t nmalloc; + uint64_t ndalloc; + + /* + * Number of allocation requests that correspond to the size of this + * bin. This includes requests served by tcache, though tcache only + * periodically merges into this counter. + */ + uint64_t nrequests; + + /* + * Current number of regions of this size class, including regions + * currently cached by tcache. + */ + size_t curregs; + + /* Number of tcache fills from this bin. */ + uint64_t nfills; + + /* Number of tcache flushes to this bin. */ + uint64_t nflushes; + + /* Total number of slabs created for this bin's size class. */ + uint64_t nslabs; + + /* + * Total number of slabs reused by extracting them from the slabs heap + * for this bin's size class. + */ + uint64_t reslabs; + + /* Current number of slabs in this bin. */ + size_t curslabs; + + /* Current size of nonfull slabs heap in this bin. */ + size_t nonfull_slabs; + + mutex_prof_data_t mutex_data; +}; + +#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/bin_types.h b/deps/jemalloc/include/jemalloc/internal/bin_types.h new file mode 100644 index 0000000..3533606 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/bin_types.h @@ -0,0 +1,17 @@ +#ifndef JEMALLOC_INTERNAL_BIN_TYPES_H +#define JEMALLOC_INTERNAL_BIN_TYPES_H + +#include "jemalloc/internal/sc.h" + +#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH) +#define N_BIN_SHARDS_DEFAULT 1 + +/* Used in TSD static initializer only. Real init in arena_bind(). */ +#define TSD_BINSHARDS_ZERO_INITIALIZER {{UINT8_MAX}} + +typedef struct tsd_binshards_s tsd_binshards_t; +struct tsd_binshards_s { + uint8_t binshard[SC_NBINS]; +}; + +#endif /* JEMALLOC_INTERNAL_BIN_TYPES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/bit_util.h b/deps/jemalloc/include/jemalloc/internal/bit_util.h new file mode 100644 index 0000000..c045eb8 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/bit_util.h @@ -0,0 +1,239 @@ +#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H +#define JEMALLOC_INTERNAL_BIT_UTIL_H + +#include "jemalloc/internal/assert.h" + +#define BIT_UTIL_INLINE static inline + +/* Sanity check. */ +#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ + || !defined(JEMALLOC_INTERNAL_FFS) +# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure +#endif + + +BIT_UTIL_INLINE unsigned +ffs_llu(unsigned long long bitmap) { + return JEMALLOC_INTERNAL_FFSLL(bitmap); +} + +BIT_UTIL_INLINE unsigned +ffs_lu(unsigned long bitmap) { + return JEMALLOC_INTERNAL_FFSL(bitmap); +} + +BIT_UTIL_INLINE unsigned +ffs_u(unsigned bitmap) { + return JEMALLOC_INTERNAL_FFS(bitmap); +} + +#ifdef JEMALLOC_INTERNAL_POPCOUNTL +BIT_UTIL_INLINE unsigned +popcount_lu(unsigned long bitmap) { + return JEMALLOC_INTERNAL_POPCOUNTL(bitmap); +} +#endif + +/* + * Clears first unset bit in bitmap, and returns + * place of bit. bitmap *must not* be 0. + */ + +BIT_UTIL_INLINE size_t +cfs_lu(unsigned long* bitmap) { + size_t bit = ffs_lu(*bitmap) - 1; + *bitmap ^= ZU(1) << bit; + return bit; +} + +BIT_UTIL_INLINE unsigned +ffs_zu(size_t bitmap) { +#if LG_SIZEOF_PTR == LG_SIZEOF_INT + return ffs_u(bitmap); +#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG + return ffs_lu(bitmap); +#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG + return ffs_llu(bitmap); +#else +#error No implementation for size_t ffs() +#endif +} + +BIT_UTIL_INLINE unsigned +ffs_u64(uint64_t bitmap) { +#if LG_SIZEOF_LONG == 3 + return ffs_lu(bitmap); +#elif LG_SIZEOF_LONG_LONG == 3 + return ffs_llu(bitmap); +#else +#error No implementation for 64-bit ffs() +#endif +} + +BIT_UTIL_INLINE unsigned +ffs_u32(uint32_t bitmap) { +#if LG_SIZEOF_INT == 2 + return ffs_u(bitmap); +#else +#error No implementation for 32-bit ffs() +#endif + return ffs_u(bitmap); +} + +BIT_UTIL_INLINE uint64_t +pow2_ceil_u64(uint64_t x) { +#if (defined(__amd64__) || defined(__x86_64__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) + if(unlikely(x <= 1)) { + return x; + } + size_t msb_on_index; +#if (defined(__amd64__) || defined(__x86_64__)) + asm ("bsrq %1, %0" + : "=r"(msb_on_index) // Outputs. + : "r"(x-1) // Inputs. + ); +#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) + msb_on_index = (63 ^ __builtin_clzll(x - 1)); +#endif + assert(msb_on_index < 63); + return 1ULL << (msb_on_index + 1); +#else + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x |= x >> 32; + x++; + return x; +#endif +} + +BIT_UTIL_INLINE uint32_t +pow2_ceil_u32(uint32_t x) { +#if ((defined(__i386__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) && (!defined(__s390__))) + if(unlikely(x <= 1)) { + return x; + } + size_t msb_on_index; +#if (defined(__i386__)) + asm ("bsr %1, %0" + : "=r"(msb_on_index) // Outputs. + : "r"(x-1) // Inputs. + ); +#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) + msb_on_index = (31 ^ __builtin_clz(x - 1)); +#endif + assert(msb_on_index < 31); + return 1U << (msb_on_index + 1); +#else + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x++; + return x; +#endif +} + +/* Compute the smallest power of 2 that is >= x. */ +BIT_UTIL_INLINE size_t +pow2_ceil_zu(size_t x) { +#if (LG_SIZEOF_PTR == 3) + return pow2_ceil_u64(x); +#else + return pow2_ceil_u32(x); +#endif +} + +#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) +BIT_UTIL_INLINE unsigned +lg_floor(size_t x) { + size_t ret; + assert(x != 0); + + asm ("bsr %1, %0" + : "=r"(ret) // Outputs. + : "r"(x) // Inputs. + ); + assert(ret < UINT_MAX); + return (unsigned)ret; +} +#elif (defined(_MSC_VER)) +BIT_UTIL_INLINE unsigned +lg_floor(size_t x) { + unsigned long ret; + + assert(x != 0); + +#if (LG_SIZEOF_PTR == 3) + _BitScanReverse64(&ret, x); +#elif (LG_SIZEOF_PTR == 2) + _BitScanReverse(&ret, x); +#else +# error "Unsupported type size for lg_floor()" +#endif + assert(ret < UINT_MAX); + return (unsigned)ret; +} +#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) +BIT_UTIL_INLINE unsigned +lg_floor(size_t x) { + assert(x != 0); + +#if (LG_SIZEOF_PTR == LG_SIZEOF_INT) + return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x); +#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) + return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x); +#else +# error "Unsupported type size for lg_floor()" +#endif +} +#else +BIT_UTIL_INLINE unsigned +lg_floor(size_t x) { + assert(x != 0); + + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); +#if (LG_SIZEOF_PTR == 3) + x |= (x >> 32); +#endif + if (x == SIZE_T_MAX) { + return (8 << LG_SIZEOF_PTR) - 1; + } + x++; + return ffs_zu(x) - 2; +} +#endif + +BIT_UTIL_INLINE unsigned +lg_ceil(size_t x) { + return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1); +} + +#undef BIT_UTIL_INLINE + +/* A compile-time version of lg_floor and lg_ceil. */ +#define LG_FLOOR_1(x) 0 +#define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1)) +#define LG_FLOOR_4(x) (x < (1ULL << 2) ? LG_FLOOR_2(x) : 2 + LG_FLOOR_2(x >> 2)) +#define LG_FLOOR_8(x) (x < (1ULL << 4) ? LG_FLOOR_4(x) : 4 + LG_FLOOR_4(x >> 4)) +#define LG_FLOOR_16(x) (x < (1ULL << 8) ? LG_FLOOR_8(x) : 8 + LG_FLOOR_8(x >> 8)) +#define LG_FLOOR_32(x) (x < (1ULL << 16) ? LG_FLOOR_16(x) : 16 + LG_FLOOR_16(x >> 16)) +#define LG_FLOOR_64(x) (x < (1ULL << 32) ? LG_FLOOR_32(x) : 32 + LG_FLOOR_32(x >> 32)) +#if LG_SIZEOF_PTR == 2 +# define LG_FLOOR(x) LG_FLOOR_32((x)) +#else +# define LG_FLOOR(x) LG_FLOOR_64((x)) +#endif + +#define LG_CEIL(x) (LG_FLOOR(x) + (((x) & ((x) - 1)) == 0 ? 0 : 1)) + +#endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/bitmap.h b/deps/jemalloc/include/jemalloc/internal/bitmap.h new file mode 100644 index 0000000..c3f9cb4 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/bitmap.h @@ -0,0 +1,369 @@ +#ifndef JEMALLOC_INTERNAL_BITMAP_H +#define JEMALLOC_INTERNAL_BITMAP_H + +#include "jemalloc/internal/arena_types.h" +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/sc.h" + +typedef unsigned long bitmap_t; +#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG + +/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ +#if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES) +/* Maximum bitmap bit count is determined by maximum regions per slab. */ +# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS +#else +/* Maximum bitmap bit count is determined by number of extent size classes. */ +# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES) +#endif +#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) + +/* Number of bits per group. */ +#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) +#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS) +#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) + +/* + * Do some analysis on how big the bitmap is before we use a tree. For a brute + * force linear search, if we would have to call ffs_lu() more than 2^3 times, + * use a tree instead. + */ +#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 +# define BITMAP_USE_TREE +#endif + +/* Number of groups required to store a given number of bits. */ +#define BITMAP_BITS2GROUPS(nbits) \ + (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) + +/* + * Number of groups required at a particular level for a given number of bits. + */ +#define BITMAP_GROUPS_L0(nbits) \ + BITMAP_BITS2GROUPS(nbits) +#define BITMAP_GROUPS_L1(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) +#define BITMAP_GROUPS_L2(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) +#define BITMAP_GROUPS_L3(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ + BITMAP_BITS2GROUPS((nbits))))) +#define BITMAP_GROUPS_L4(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))))) + +/* + * Assuming the number of levels, number of groups required for a given number + * of bits. + */ +#define BITMAP_GROUPS_1_LEVEL(nbits) \ + BITMAP_GROUPS_L0(nbits) +#define BITMAP_GROUPS_2_LEVEL(nbits) \ + (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) +#define BITMAP_GROUPS_3_LEVEL(nbits) \ + (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) +#define BITMAP_GROUPS_4_LEVEL(nbits) \ + (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) +#define BITMAP_GROUPS_5_LEVEL(nbits) \ + (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits)) + +/* + * Maximum number of groups required to support LG_BITMAP_MAXBITS. + */ +#ifdef BITMAP_USE_TREE + +#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS) +#else +# error "Unsupported bitmap size" +#endif + +/* + * Maximum number of levels possible. This could be statically computed based + * on LG_BITMAP_MAXBITS: + * + * #define BITMAP_MAX_LEVELS \ + * (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ + * + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) + * + * However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so + * instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the + * various cascading macros. The only additional cost this incurs is some + * unused trailing entries in bitmap_info_t structures; the bitmaps themselves + * are not impacted. + */ +#define BITMAP_MAX_LEVELS 5 + +#define BITMAP_INFO_INITIALIZER(nbits) { \ + /* nbits. */ \ + nbits, \ + /* nlevels. */ \ + (BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \ + (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \ + (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \ + (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \ + /* levels. */ \ + { \ + {0}, \ + {BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \ + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \ + BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \ + BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \ + + BITMAP_GROUPS_L0(nbits)} \ + } \ +} + +#else /* BITMAP_USE_TREE */ + +#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits) +#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) + +#define BITMAP_INFO_INITIALIZER(nbits) { \ + /* nbits. */ \ + nbits, \ + /* ngroups. */ \ + BITMAP_BITS2GROUPS(nbits) \ +} + +#endif /* BITMAP_USE_TREE */ + +typedef struct bitmap_level_s { + /* Offset of this level's groups within the array of groups. */ + size_t group_offset; +} bitmap_level_t; + +typedef struct bitmap_info_s { + /* Logical number of bits in bitmap (stored at bottom level). */ + size_t nbits; + +#ifdef BITMAP_USE_TREE + /* Number of levels necessary for nbits. */ + unsigned nlevels; + + /* + * Only the first (nlevels+1) elements are used, and levels are ordered + * bottom to top (e.g. the bottom level is stored in levels[0]). + */ + bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; +#else /* BITMAP_USE_TREE */ + /* Number of groups necessary for nbits. */ + size_t ngroups; +#endif /* BITMAP_USE_TREE */ +} bitmap_info_t; + +void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); +void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill); +size_t bitmap_size(const bitmap_info_t *binfo); + +static inline bool +bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { +#ifdef BITMAP_USE_TREE + size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; + bitmap_t rg = bitmap[rgoff]; + /* The bitmap is full iff the root group is 0. */ + return (rg == 0); +#else + size_t i; + + for (i = 0; i < binfo->ngroups; i++) { + if (bitmap[i] != 0) { + return false; + } + } + return true; +#endif +} + +static inline bool +bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { + size_t goff; + bitmap_t g; + + assert(bit < binfo->nbits); + goff = bit >> LG_BITMAP_GROUP_NBITS; + g = bitmap[goff]; + return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); +} + +static inline void +bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { + size_t goff; + bitmap_t *gp; + bitmap_t g; + + assert(bit < binfo->nbits); + assert(!bitmap_get(bitmap, binfo, bit)); + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; + assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + assert(bitmap_get(bitmap, binfo, bit)); +#ifdef BITMAP_USE_TREE + /* Propagate group state transitions up the tree. */ + if (g == 0) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; + assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + if (g != 0) { + break; + } + } + } +#endif +} + +/* ffu: find first unset >= bit. */ +static inline size_t +bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { + assert(min_bit < binfo->nbits); + +#ifdef BITMAP_USE_TREE + size_t bit = 0; + for (unsigned level = binfo->nlevels; level--;) { + size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level + + 1)); + bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit + >> lg_bits_per_group)]; + unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit - + bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS)); + assert(group_nmask <= BITMAP_GROUP_NBITS); + bitmap_t group_mask = ~((1LU << group_nmask) - 1); + bitmap_t group_masked = group & group_mask; + if (group_masked == 0LU) { + if (group == 0LU) { + return binfo->nbits; + } + /* + * min_bit was preceded by one or more unset bits in + * this group, but there are no other unset bits in this + * group. Try again starting at the first bit of the + * next sibling. This will recurse at most once per + * non-root level. + */ + size_t sib_base = bit + (ZU(1) << lg_bits_per_group); + assert(sib_base > min_bit); + assert(sib_base > bit); + if (sib_base >= binfo->nbits) { + return binfo->nbits; + } + return bitmap_ffu(bitmap, binfo, sib_base); + } + bit += ((size_t)(ffs_lu(group_masked) - 1)) << + (lg_bits_per_group - LG_BITMAP_GROUP_NBITS); + } + assert(bit >= min_bit); + assert(bit < binfo->nbits); + return bit; +#else + size_t i = min_bit >> LG_BITMAP_GROUP_NBITS; + bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK)) + - 1); + size_t bit; + do { + bit = ffs_lu(g); + if (bit != 0) { + return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); + } + i++; + g = bitmap[i]; + } while (i < binfo->ngroups); + return binfo->nbits; +#endif +} + +/* sfu: set first unset. */ +static inline size_t +bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { + size_t bit; + bitmap_t g; + unsigned i; + + assert(!bitmap_full(bitmap, binfo)); + +#ifdef BITMAP_USE_TREE + i = binfo->nlevels - 1; + g = bitmap[binfo->levels[i].group_offset]; + bit = ffs_lu(g) - 1; + while (i > 0) { + i--; + g = bitmap[binfo->levels[i].group_offset + bit]; + bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); + } +#else + i = 0; + g = bitmap[0]; + while ((bit = ffs_lu(g)) == 0) { + i++; + g = bitmap[i]; + } + bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); +#endif + bitmap_set(bitmap, binfo, bit); + return bit; +} + +static inline void +bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { + size_t goff; + bitmap_t *gp; + bitmap_t g; + UNUSED bool propagate; + + assert(bit < binfo->nbits); + assert(bitmap_get(bitmap, binfo, bit)); + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; + propagate = (g == 0); + assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + assert(!bitmap_get(bitmap, binfo, bit)); +#ifdef BITMAP_USE_TREE + /* Propagate group state transitions up the tree. */ + if (propagate) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; + propagate = (g == 0); + assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) + == 0); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + if (!propagate) { + break; + } + } + } +#endif /* BITMAP_USE_TREE */ +} + +#endif /* JEMALLOC_INTERNAL_BITMAP_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/cache_bin.h b/deps/jemalloc/include/jemalloc/internal/cache_bin.h new file mode 100644 index 0000000..d14556a --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/cache_bin.h @@ -0,0 +1,131 @@ +#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H +#define JEMALLOC_INTERNAL_CACHE_BIN_H + +#include "jemalloc/internal/ql.h" + +/* + * The cache_bins are the mechanism that the tcache and the arena use to + * communicate. The tcache fills from and flushes to the arena by passing a + * cache_bin_t to fill/flush. When the arena needs to pull stats from the + * tcaches associated with it, it does so by iterating over its + * cache_bin_array_descriptor_t objects and reading out per-bin stats it + * contains. This makes it so that the arena need not know about the existence + * of the tcache at all. + */ + + +/* + * The count of the number of cached allocations in a bin. We make this signed + * so that negative numbers can encode "invalid" states (e.g. a low water mark + * of -1 for a cache that has been depleted). + */ +typedef int32_t cache_bin_sz_t; + +typedef struct cache_bin_stats_s cache_bin_stats_t; +struct cache_bin_stats_s { + /* + * Number of allocation requests that corresponded to the size of this + * bin. + */ + uint64_t nrequests; +}; + +/* + * Read-only information associated with each element of tcache_t's tbins array + * is stored separately, mainly to reduce memory usage. + */ +typedef struct cache_bin_info_s cache_bin_info_t; +struct cache_bin_info_s { + /* Upper limit on ncached. */ + cache_bin_sz_t ncached_max; +}; + +typedef struct cache_bin_s cache_bin_t; +struct cache_bin_s { + /* Min # cached since last GC. */ + cache_bin_sz_t low_water; + /* # of cached objects. */ + cache_bin_sz_t ncached; + /* + * ncached and stats are both modified frequently. Let's keep them + * close so that they have a higher chance of being on the same + * cacheline, thus less write-backs. + */ + cache_bin_stats_t tstats; + /* + * Stack of available objects. + * + * To make use of adjacent cacheline prefetch, the items in the avail + * stack goes to higher address for newer allocations. avail points + * just above the available space, which means that + * avail[-ncached, ... -1] are available items and the lowest item will + * be allocated first. + */ + void **avail; +}; + +typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t; +struct cache_bin_array_descriptor_s { + /* + * The arena keeps a list of the cache bins associated with it, for + * stats collection. + */ + ql_elm(cache_bin_array_descriptor_t) link; + /* Pointers to the tcache bins. */ + cache_bin_t *bins_small; + cache_bin_t *bins_large; +}; + +static inline void +cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor, + cache_bin_t *bins_small, cache_bin_t *bins_large) { + ql_elm_new(descriptor, link); + descriptor->bins_small = bins_small; + descriptor->bins_large = bins_large; +} + +JEMALLOC_ALWAYS_INLINE void * +cache_bin_alloc_easy(cache_bin_t *bin, bool *success) { + void *ret; + + bin->ncached--; + + /* + * Check for both bin->ncached == 0 and ncached < low_water + * in a single branch. + */ + if (unlikely(bin->ncached <= bin->low_water)) { + bin->low_water = bin->ncached; + if (bin->ncached == -1) { + bin->ncached = 0; + *success = false; + return NULL; + } + } + + /* + * success (instead of ret) should be checked upon the return of this + * function. We avoid checking (ret == NULL) because there is never a + * null stored on the avail stack (which is unknown to the compiler), + * and eagerly checking ret would cause pipeline stall (waiting for the + * cacheline). + */ + *success = true; + ret = *(bin->avail - (bin->ncached + 1)); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE bool +cache_bin_dalloc_easy(cache_bin_t *bin, cache_bin_info_t *bin_info, void *ptr) { + if (unlikely(bin->ncached == bin_info->ncached_max)) { + return false; + } + assert(bin->ncached < bin_info->ncached_max); + bin->ncached++; + *(bin->avail - bin->ncached) = ptr; + + return true; +} + +#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/ckh.h b/deps/jemalloc/include/jemalloc/internal/ckh.h new file mode 100644 index 0000000..7b3850b --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/ckh.h @@ -0,0 +1,101 @@ +#ifndef JEMALLOC_INTERNAL_CKH_H +#define JEMALLOC_INTERNAL_CKH_H + +#include "jemalloc/internal/tsd.h" + +/* Cuckoo hashing implementation. Skip to the end for the interface. */ + +/******************************************************************************/ +/* INTERNAL DEFINITIONS -- IGNORE */ +/******************************************************************************/ + +/* Maintain counters used to get an idea of performance. */ +/* #define CKH_COUNT */ +/* Print counter values in ckh_delete() (requires CKH_COUNT). */ +/* #define CKH_VERBOSE */ + +/* + * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit + * one bucket per L1 cache line. + */ +#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) + +/* Typedefs to allow easy function pointer passing. */ +typedef void ckh_hash_t (const void *, size_t[2]); +typedef bool ckh_keycomp_t (const void *, const void *); + +/* Hash table cell. */ +typedef struct { + const void *key; + const void *data; +} ckhc_t; + +/* The hash table itself. */ +typedef struct { +#ifdef CKH_COUNT + /* Counters used to get an idea of performance. */ + uint64_t ngrows; + uint64_t nshrinks; + uint64_t nshrinkfails; + uint64_t ninserts; + uint64_t nrelocs; +#endif + + /* Used for pseudo-random number generation. */ + uint64_t prng_state; + + /* Total number of items. */ + size_t count; + + /* + * Minimum and current number of hash table buckets. There are + * 2^LG_CKH_BUCKET_CELLS cells per bucket. + */ + unsigned lg_minbuckets; + unsigned lg_curbuckets; + + /* Hash and comparison functions. */ + ckh_hash_t *hash; + ckh_keycomp_t *keycomp; + + /* Hash table with 2^lg_curbuckets buckets. */ + ckhc_t *tab; +} ckh_t; + +/******************************************************************************/ +/* BEGIN PUBLIC API */ +/******************************************************************************/ + +/* Lifetime management. Minitems is the initial capacity. */ +bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, + ckh_keycomp_t *keycomp); +void ckh_delete(tsd_t *tsd, ckh_t *ckh); + +/* Get the number of elements in the set. */ +size_t ckh_count(ckh_t *ckh); + +/* + * To iterate over the elements in the table, initialize *tabind to 0 and call + * this function until it returns true. Each call that returns false will + * update *key and *data to the next element in the table, assuming the pointers + * are non-NULL. + */ +bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); + +/* + * Basic hash table operations -- insert, removal, lookup. For ckh_remove and + * ckh_search, key or data can be NULL. The hash-table only stores pointers to + * the key and value, and doesn't do any lifetime management. + */ +bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); +bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, + void **data); +bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); + +/* Some useful hash and comparison functions for strings and pointers. */ +void ckh_string_hash(const void *key, size_t r_hash[2]); +bool ckh_string_keycomp(const void *k1, const void *k2); +void ckh_pointer_hash(const void *key, size_t r_hash[2]); +bool ckh_pointer_keycomp(const void *k1, const void *k2); + +#endif /* JEMALLOC_INTERNAL_CKH_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/ctl.h b/deps/jemalloc/include/jemalloc/internal/ctl.h new file mode 100644 index 0000000..1d1aacc --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/ctl.h @@ -0,0 +1,134 @@ +#ifndef JEMALLOC_INTERNAL_CTL_H +#define JEMALLOC_INTERNAL_CTL_H + +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/stats.h" + +/* Maximum ctl tree depth. */ +#define CTL_MAX_DEPTH 7 + +typedef struct ctl_node_s { + bool named; +} ctl_node_t; + +typedef struct ctl_named_node_s { + ctl_node_t node; + const char *name; + /* If (nchildren == 0), this is a terminal node. */ + size_t nchildren; + const ctl_node_t *children; + int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *, + size_t); +} ctl_named_node_t; + +typedef struct ctl_indexed_node_s { + struct ctl_node_s node; + const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, + size_t); +} ctl_indexed_node_t; + +typedef struct ctl_arena_stats_s { + arena_stats_t astats; + + /* Aggregate stats for small size classes, based on bin stats. */ + size_t allocated_small; + uint64_t nmalloc_small; + uint64_t ndalloc_small; + uint64_t nrequests_small; + uint64_t nfills_small; + uint64_t nflushes_small; + + bin_stats_t bstats[SC_NBINS]; + arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; + arena_stats_extents_t estats[SC_NPSIZES]; +} ctl_arena_stats_t; + +typedef struct ctl_stats_s { + size_t allocated; + size_t active; + size_t metadata; + size_t metadata_thp; + size_t resident; + size_t mapped; + size_t retained; + + background_thread_stats_t background_thread; + mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes]; +} ctl_stats_t; + +typedef struct ctl_arena_s ctl_arena_t; +struct ctl_arena_s { + unsigned arena_ind; + bool initialized; + ql_elm(ctl_arena_t) destroyed_link; + + /* Basic stats, supported even if !config_stats. */ + unsigned nthreads; + const char *dss; + ssize_t dirty_decay_ms; + ssize_t muzzy_decay_ms; + size_t pactive; + size_t pdirty; + size_t pmuzzy; + + /* NULL if !config_stats. */ + ctl_arena_stats_t *astats; +}; + +typedef struct ctl_arenas_s { + uint64_t epoch; + unsigned narenas; + ql_head(ctl_arena_t) destroyed; + + /* + * Element 0 corresponds to merged stats for extant arenas (accessed via + * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for + * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the + * remaining MALLOCX_ARENA_LIMIT elements correspond to arenas. + */ + ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT]; +} ctl_arenas_t; + +int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen); +int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp); + +int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen); +bool ctl_boot(void); +void ctl_prefork(tsdn_t *tsdn); +void ctl_postfork_parent(tsdn_t *tsdn); +void ctl_postfork_child(tsdn_t *tsdn); + +#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ + if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ + != 0) { \ + malloc_printf( \ + ": Failure in xmallctl(\"%s\", ...)\n", \ + name); \ + abort(); \ + } \ +} while (0) + +#define xmallctlnametomib(name, mibp, miblenp) do { \ + if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ + malloc_printf(": Failure in " \ + "xmallctlnametomib(\"%s\", ...)\n", name); \ + abort(); \ + } \ +} while (0) + +#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ + if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ + newlen) != 0) { \ + malloc_write( \ + ": Failure in xmallctlbymib()\n"); \ + abort(); \ + } \ +} while (0) + +#endif /* JEMALLOC_INTERNAL_CTL_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/div.h b/deps/jemalloc/include/jemalloc/internal/div.h new file mode 100644 index 0000000..aebae93 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/div.h @@ -0,0 +1,41 @@ +#ifndef JEMALLOC_INTERNAL_DIV_H +#define JEMALLOC_INTERNAL_DIV_H + +#include "jemalloc/internal/assert.h" + +/* + * This module does the division that computes the index of a region in a slab, + * given its offset relative to the base. + * That is, given a divisor d, an n = i * d (all integers), we'll return i. + * We do some pre-computation to do this more quickly than a CPU division + * instruction. + * We bound n < 2^32, and don't support dividing by one. + */ + +typedef struct div_info_s div_info_t; +struct div_info_s { + uint32_t magic; +#ifdef JEMALLOC_DEBUG + size_t d; +#endif +}; + +void div_init(div_info_t *div_info, size_t divisor); + +static inline size_t +div_compute(div_info_t *div_info, size_t n) { + assert(n <= (uint32_t)-1); + /* + * This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine, + * the compilers I tried were all smart enough to turn this into the + * appropriate "get the high 32 bits of the result of a multiply" (e.g. + * mul; mov edx eax; on x86, umull on arm, etc.). + */ + size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32; +#ifdef JEMALLOC_DEBUG + assert(i * div_info->d == n); +#endif + return i; +} + +#endif /* JEMALLOC_INTERNAL_DIV_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/emitter.h b/deps/jemalloc/include/jemalloc/internal/emitter.h new file mode 100644 index 0000000..542bc79 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/emitter.h @@ -0,0 +1,486 @@ +#ifndef JEMALLOC_INTERNAL_EMITTER_H +#define JEMALLOC_INTERNAL_EMITTER_H + +#include "jemalloc/internal/ql.h" + +typedef enum emitter_output_e emitter_output_t; +enum emitter_output_e { + emitter_output_json, + emitter_output_table +}; + +typedef enum emitter_justify_e emitter_justify_t; +enum emitter_justify_e { + emitter_justify_left, + emitter_justify_right, + /* Not for users; just to pass to internal functions. */ + emitter_justify_none +}; + +typedef enum emitter_type_e emitter_type_t; +enum emitter_type_e { + emitter_type_bool, + emitter_type_int, + emitter_type_unsigned, + emitter_type_uint32, + emitter_type_uint64, + emitter_type_size, + emitter_type_ssize, + emitter_type_string, + /* + * A title is a column title in a table; it's just a string, but it's + * not quoted. + */ + emitter_type_title, +}; + +typedef struct emitter_col_s emitter_col_t; +struct emitter_col_s { + /* Filled in by the user. */ + emitter_justify_t justify; + int width; + emitter_type_t type; + union { + bool bool_val; + int int_val; + unsigned unsigned_val; + uint32_t uint32_val; + uint32_t uint32_t_val; + uint64_t uint64_val; + uint64_t uint64_t_val; + size_t size_val; + ssize_t ssize_val; + const char *str_val; + }; + + /* Filled in by initialization. */ + ql_elm(emitter_col_t) link; +}; + +typedef struct emitter_row_s emitter_row_t; +struct emitter_row_s { + ql_head(emitter_col_t) cols; +}; + +typedef struct emitter_s emitter_t; +struct emitter_s { + emitter_output_t output; + /* The output information. */ + void (*write_cb)(void *, const char *); + void *cbopaque; + int nesting_depth; + /* True if we've already emitted a value at the given depth. */ + bool item_at_depth; + /* True if we emitted a key and will emit corresponding value next. */ + bool emitted_key; +}; + +/* Internal convenience function. Write to the emitter the given string. */ +JEMALLOC_FORMAT_PRINTF(2, 3) +static inline void +emitter_printf(emitter_t *emitter, const char *format, ...) { + va_list ap; + + va_start(ap, format); + malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap); + va_end(ap); +} + +static inline const char * JEMALLOC_FORMAT_ARG(3) +emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier, + emitter_justify_t justify, int width) { + size_t written; + fmt_specifier++; + if (justify == emitter_justify_none) { + written = malloc_snprintf(out_fmt, out_size, + "%%%s", fmt_specifier); + } else if (justify == emitter_justify_left) { + written = malloc_snprintf(out_fmt, out_size, + "%%-%d%s", width, fmt_specifier); + } else { + written = malloc_snprintf(out_fmt, out_size, + "%%%d%s", width, fmt_specifier); + } + /* Only happens in case of bad format string, which *we* choose. */ + assert(written < out_size); + return out_fmt; +} + +/* + * Internal. Emit the given value type in the relevant encoding (so that the + * bool true gets mapped to json "true", but the string "true" gets mapped to + * json "\"true\"", for instance. + * + * Width is ignored if justify is emitter_justify_none. + */ +static inline void +emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width, + emitter_type_t value_type, const void *value) { + size_t str_written; +#define BUF_SIZE 256 +#define FMT_SIZE 10 + /* + * We dynamically generate a format string to emit, to let us use the + * snprintf machinery. This is kinda hacky, but gets the job done + * quickly without having to think about the various snprintf edge + * cases. + */ + char fmt[FMT_SIZE]; + char buf[BUF_SIZE]; + +#define EMIT_SIMPLE(type, format) \ + emitter_printf(emitter, \ + emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width), \ + *(const type *)value); + + switch (value_type) { + case emitter_type_bool: + emitter_printf(emitter, + emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), + *(const bool *)value ? "true" : "false"); + break; + case emitter_type_int: + EMIT_SIMPLE(int, "%d") + break; + case emitter_type_unsigned: + EMIT_SIMPLE(unsigned, "%u") + break; + case emitter_type_ssize: + EMIT_SIMPLE(ssize_t, "%zd") + break; + case emitter_type_size: + EMIT_SIMPLE(size_t, "%zu") + break; + case emitter_type_string: + str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"", + *(const char *const *)value); + /* + * We control the strings we output; we shouldn't get anything + * anywhere near the fmt size. + */ + assert(str_written < BUF_SIZE); + emitter_printf(emitter, + emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf); + break; + case emitter_type_uint32: + EMIT_SIMPLE(uint32_t, "%" FMTu32) + break; + case emitter_type_uint64: + EMIT_SIMPLE(uint64_t, "%" FMTu64) + break; + case emitter_type_title: + EMIT_SIMPLE(char *const, "%s"); + break; + default: + unreachable(); + } +#undef BUF_SIZE +#undef FMT_SIZE +} + + +/* Internal functions. In json mode, tracks nesting state. */ +static inline void +emitter_nest_inc(emitter_t *emitter) { + emitter->nesting_depth++; + emitter->item_at_depth = false; +} + +static inline void +emitter_nest_dec(emitter_t *emitter) { + emitter->nesting_depth--; + emitter->item_at_depth = true; +} + +static inline void +emitter_indent(emitter_t *emitter) { + int amount = emitter->nesting_depth; + const char *indent_str; + if (emitter->output == emitter_output_json) { + indent_str = "\t"; + } else { + amount *= 2; + indent_str = " "; + } + for (int i = 0; i < amount; i++) { + emitter_printf(emitter, "%s", indent_str); + } +} + +static inline void +emitter_json_key_prefix(emitter_t *emitter) { + if (emitter->emitted_key) { + emitter->emitted_key = false; + return; + } + emitter_printf(emitter, "%s\n", emitter->item_at_depth ? "," : ""); + emitter_indent(emitter); +} + +/******************************************************************************/ +/* Public functions for emitter_t. */ + +static inline void +emitter_init(emitter_t *emitter, emitter_output_t emitter_output, + void (*write_cb)(void *, const char *), void *cbopaque) { + emitter->output = emitter_output; + emitter->write_cb = write_cb; + emitter->cbopaque = cbopaque; + emitter->item_at_depth = false; + emitter->emitted_key = false; + emitter->nesting_depth = 0; +} + +/******************************************************************************/ +/* JSON public API. */ + +/* + * Emits a key (e.g. as appears in an object). The next json entity emitted will + * be the corresponding value. + */ +static inline void +emitter_json_key(emitter_t *emitter, const char *json_key) { + if (emitter->output == emitter_output_json) { + emitter_json_key_prefix(emitter); + emitter_printf(emitter, "\"%s\": ", json_key); + emitter->emitted_key = true; + } +} + +static inline void +emitter_json_value(emitter_t *emitter, emitter_type_t value_type, + const void *value) { + if (emitter->output == emitter_output_json) { + emitter_json_key_prefix(emitter); + emitter_print_value(emitter, emitter_justify_none, -1, + value_type, value); + emitter->item_at_depth = true; + } +} + +/* Shorthand for calling emitter_json_key and then emitter_json_value. */ +static inline void +emitter_json_kv(emitter_t *emitter, const char *json_key, + emitter_type_t value_type, const void *value) { + emitter_json_key(emitter, json_key); + emitter_json_value(emitter, value_type, value); +} + +static inline void +emitter_json_array_begin(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + emitter_json_key_prefix(emitter); + emitter_printf(emitter, "["); + emitter_nest_inc(emitter); + } +} + +/* Shorthand for calling emitter_json_key and then emitter_json_array_begin. */ +static inline void +emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) { + emitter_json_key(emitter, json_key); + emitter_json_array_begin(emitter); +} + +static inline void +emitter_json_array_end(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + assert(emitter->nesting_depth > 0); + emitter_nest_dec(emitter); + emitter_printf(emitter, "\n"); + emitter_indent(emitter); + emitter_printf(emitter, "]"); + } +} + +static inline void +emitter_json_object_begin(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + emitter_json_key_prefix(emitter); + emitter_printf(emitter, "{"); + emitter_nest_inc(emitter); + } +} + +/* Shorthand for calling emitter_json_key and then emitter_json_object_begin. */ +static inline void +emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) { + emitter_json_key(emitter, json_key); + emitter_json_object_begin(emitter); +} + +static inline void +emitter_json_object_end(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + assert(emitter->nesting_depth > 0); + emitter_nest_dec(emitter); + emitter_printf(emitter, "\n"); + emitter_indent(emitter); + emitter_printf(emitter, "}"); + } +} + + +/******************************************************************************/ +/* Table public API. */ + +static inline void +emitter_table_dict_begin(emitter_t *emitter, const char *table_key) { + if (emitter->output == emitter_output_table) { + emitter_indent(emitter); + emitter_printf(emitter, "%s\n", table_key); + emitter_nest_inc(emitter); + } +} + +static inline void +emitter_table_dict_end(emitter_t *emitter) { + if (emitter->output == emitter_output_table) { + emitter_nest_dec(emitter); + } +} + +static inline void +emitter_table_kv_note(emitter_t *emitter, const char *table_key, + emitter_type_t value_type, const void *value, + const char *table_note_key, emitter_type_t table_note_value_type, + const void *table_note_value) { + if (emitter->output == emitter_output_table) { + emitter_indent(emitter); + emitter_printf(emitter, "%s: ", table_key); + emitter_print_value(emitter, emitter_justify_none, -1, + value_type, value); + if (table_note_key != NULL) { + emitter_printf(emitter, " (%s: ", table_note_key); + emitter_print_value(emitter, emitter_justify_none, -1, + table_note_value_type, table_note_value); + emitter_printf(emitter, ")"); + } + emitter_printf(emitter, "\n"); + } + emitter->item_at_depth = true; +} + +static inline void +emitter_table_kv(emitter_t *emitter, const char *table_key, + emitter_type_t value_type, const void *value) { + emitter_table_kv_note(emitter, table_key, value_type, value, NULL, + emitter_type_bool, NULL); +} + + +/* Write to the emitter the given string, but only in table mode. */ +JEMALLOC_FORMAT_PRINTF(2, 3) +static inline void +emitter_table_printf(emitter_t *emitter, const char *format, ...) { + if (emitter->output == emitter_output_table) { + va_list ap; + va_start(ap, format); + malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap); + va_end(ap); + } +} + +static inline void +emitter_table_row(emitter_t *emitter, emitter_row_t *row) { + if (emitter->output != emitter_output_table) { + return; + } + emitter_col_t *col; + ql_foreach(col, &row->cols, link) { + emitter_print_value(emitter, col->justify, col->width, + col->type, (const void *)&col->bool_val); + } + emitter_table_printf(emitter, "\n"); +} + +static inline void +emitter_row_init(emitter_row_t *row) { + ql_new(&row->cols); +} + +static inline void +emitter_col_init(emitter_col_t *col, emitter_row_t *row) { + ql_elm_new(col, link); + ql_tail_insert(&row->cols, col, link); +} + + +/******************************************************************************/ +/* + * Generalized public API. Emits using either JSON or table, according to + * settings in the emitter_t. */ + +/* + * Note emits a different kv pair as well, but only in table mode. Omits the + * note if table_note_key is NULL. + */ +static inline void +emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key, + emitter_type_t value_type, const void *value, + const char *table_note_key, emitter_type_t table_note_value_type, + const void *table_note_value) { + if (emitter->output == emitter_output_json) { + emitter_json_key(emitter, json_key); + emitter_json_value(emitter, value_type, value); + } else { + emitter_table_kv_note(emitter, table_key, value_type, value, + table_note_key, table_note_value_type, table_note_value); + } + emitter->item_at_depth = true; +} + +static inline void +emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key, + emitter_type_t value_type, const void *value) { + emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL, + emitter_type_bool, NULL); +} + +static inline void +emitter_dict_begin(emitter_t *emitter, const char *json_key, + const char *table_header) { + if (emitter->output == emitter_output_json) { + emitter_json_key(emitter, json_key); + emitter_json_object_begin(emitter); + } else { + emitter_table_dict_begin(emitter, table_header); + } +} + +static inline void +emitter_dict_end(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + emitter_json_object_end(emitter); + } else { + emitter_table_dict_end(emitter); + } +} + +static inline void +emitter_begin(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + assert(emitter->nesting_depth == 0); + emitter_printf(emitter, "{"); + emitter_nest_inc(emitter); + } else { + /* + * This guarantees that we always call write_cb at least once. + * This is useful if some invariant is established by each call + * to write_cb, but doesn't hold initially: e.g., some buffer + * holds a null-terminated string. + */ + emitter_printf(emitter, "%s", ""); + } +} + +static inline void +emitter_end(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + assert(emitter->nesting_depth == 1); + emitter_nest_dec(emitter); + emitter_printf(emitter, "\n}\n"); + } +} + +#endif /* JEMALLOC_INTERNAL_EMITTER_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/extent_dss.h b/deps/jemalloc/include/jemalloc/internal/extent_dss.h new file mode 100644 index 0000000..e8f02ce --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/extent_dss.h @@ -0,0 +1,26 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H +#define JEMALLOC_INTERNAL_EXTENT_DSS_H + +typedef enum { + dss_prec_disabled = 0, + dss_prec_primary = 1, + dss_prec_secondary = 2, + + dss_prec_limit = 3 +} dss_prec_t; +#define DSS_PREC_DEFAULT dss_prec_secondary +#define DSS_DEFAULT "secondary" + +extern const char *dss_prec_names[]; + +extern const char *opt_dss; + +dss_prec_t extent_dss_prec_get(void); +bool extent_dss_prec_set(dss_prec_t dss_prec); +void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit); +bool extent_in_dss(void *addr); +bool extent_dss_mergeable(void *addr_a, void *addr_b); +void extent_dss_boot(void); + +#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/extent_externs.h b/deps/jemalloc/include/jemalloc/internal/extent_externs.h new file mode 100644 index 0000000..8aba576 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/extent_externs.h @@ -0,0 +1,83 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H +#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H + +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/rtree.h" + +extern size_t opt_lg_extent_max_active_fit; + +extern rtree_t extents_rtree; +extern const extent_hooks_t extent_hooks_default; +extern mutex_pool_t extent_mutex_pool; + +extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena); +void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent); + +extent_hooks_t *extent_hooks_get(arena_t *arena); +extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena, + extent_hooks_t *extent_hooks); + +#ifdef JEMALLOC_JET +size_t extent_size_quantize_floor(size_t size); +size_t extent_size_quantize_ceil(size_t size); +#endif + +ph_proto(, extent_avail_, extent_tree_t, extent_t) +ph_proto(, extent_heap_, extent_heap_t, extent_t) + +bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, + bool delay_coalesce); +extent_state_t extents_state_get(const extents_t *extents); +size_t extents_npages_get(extents_t *extents); +/* Get the number of extents in the given page size index. */ +size_t extents_nextents_get(extents_t *extents, pszind_t ind); +/* Get the sum total bytes of the extents in the given page size index. */ +size_t extents_nbytes_get(extents_t *extents, pszind_t ind); +extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, + size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, + bool *zero, bool *commit); +void extents_dalloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent); +extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min); +void extents_prefork(tsdn_t *tsdn, extents_t *extents); +void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents); +void extents_postfork_child(tsdn_t *tsdn, extents_t *extents); +extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit); +void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent); +void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent); +void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent); +bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length); +bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length); +bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length); +bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length); +extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b); +bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b); + +bool extent_boot(void); + +void extent_util_stats_get(tsdn_t *tsdn, const void *ptr, + size_t *nfree, size_t *nregs, size_t *size); +void extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr, + size_t *nfree, size_t *nregs, size_t *size, + size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr); + +#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/extent_inlines.h b/deps/jemalloc/include/jemalloc/internal/extent_inlines.h new file mode 100644 index 0000000..77fa4c4 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/extent_inlines.h @@ -0,0 +1,501 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H +#define JEMALLOC_INTERNAL_EXTENT_INLINES_H + +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/sz.h" + +static inline void +extent_lock(tsdn_t *tsdn, extent_t *extent) { + assert(extent != NULL); + mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent); +} + +static inline void +extent_unlock(tsdn_t *tsdn, extent_t *extent) { + assert(extent != NULL); + mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent); +} + +static inline void +extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) { + assert(extent1 != NULL && extent2 != NULL); + mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1, + (uintptr_t)extent2); +} + +static inline void +extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) { + assert(extent1 != NULL && extent2 != NULL); + mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1, + (uintptr_t)extent2); +} + +static inline unsigned +extent_arena_ind_get(const extent_t *extent) { + unsigned arena_ind = (unsigned)((extent->e_bits & + EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT); + assert(arena_ind < MALLOCX_ARENA_LIMIT); + + return arena_ind; +} + +static inline arena_t * +extent_arena_get(const extent_t *extent) { + unsigned arena_ind = extent_arena_ind_get(extent); + + return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE); +} + +static inline szind_t +extent_szind_get_maybe_invalid(const extent_t *extent) { + szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >> + EXTENT_BITS_SZIND_SHIFT); + assert(szind <= SC_NSIZES); + return szind; +} + +static inline szind_t +extent_szind_get(const extent_t *extent) { + szind_t szind = extent_szind_get_maybe_invalid(extent); + assert(szind < SC_NSIZES); /* Never call when "invalid". */ + return szind; +} + +static inline size_t +extent_usize_get(const extent_t *extent) { + return sz_index2size(extent_szind_get(extent)); +} + +static inline unsigned +extent_binshard_get(const extent_t *extent) { + unsigned binshard = (unsigned)((extent->e_bits & + EXTENT_BITS_BINSHARD_MASK) >> EXTENT_BITS_BINSHARD_SHIFT); + assert(binshard < bin_infos[extent_szind_get(extent)].n_shards); + return binshard; +} + +static inline size_t +extent_sn_get(const extent_t *extent) { + return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >> + EXTENT_BITS_SN_SHIFT); +} + +static inline extent_state_t +extent_state_get(const extent_t *extent) { + return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >> + EXTENT_BITS_STATE_SHIFT); +} + +static inline bool +extent_zeroed_get(const extent_t *extent) { + return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >> + EXTENT_BITS_ZEROED_SHIFT); +} + +static inline bool +extent_committed_get(const extent_t *extent) { + return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >> + EXTENT_BITS_COMMITTED_SHIFT); +} + +static inline bool +extent_dumpable_get(const extent_t *extent) { + return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >> + EXTENT_BITS_DUMPABLE_SHIFT); +} + +static inline bool +extent_slab_get(const extent_t *extent) { + return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >> + EXTENT_BITS_SLAB_SHIFT); +} + +static inline unsigned +extent_nfree_get(const extent_t *extent) { + assert(extent_slab_get(extent)); + return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >> + EXTENT_BITS_NFREE_SHIFT); +} + +static inline void * +extent_base_get(const extent_t *extent) { + assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || + !extent_slab_get(extent)); + return PAGE_ADDR2BASE(extent->e_addr); +} + +static inline void * +extent_addr_get(const extent_t *extent) { + assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || + !extent_slab_get(extent)); + return extent->e_addr; +} + +static inline size_t +extent_size_get(const extent_t *extent) { + return (extent->e_size_esn & EXTENT_SIZE_MASK); +} + +static inline size_t +extent_esn_get(const extent_t *extent) { + return (extent->e_size_esn & EXTENT_ESN_MASK); +} + +static inline size_t +extent_bsize_get(const extent_t *extent) { + return extent->e_bsize; +} + +static inline void * +extent_before_get(const extent_t *extent) { + return (void *)((uintptr_t)extent_base_get(extent) - PAGE); +} + +static inline void * +extent_last_get(const extent_t *extent) { + return (void *)((uintptr_t)extent_base_get(extent) + + extent_size_get(extent) - PAGE); +} + +static inline void * +extent_past_get(const extent_t *extent) { + return (void *)((uintptr_t)extent_base_get(extent) + + extent_size_get(extent)); +} + +static inline arena_slab_data_t * +extent_slab_data_get(extent_t *extent) { + assert(extent_slab_get(extent)); + return &extent->e_slab_data; +} + +static inline const arena_slab_data_t * +extent_slab_data_get_const(const extent_t *extent) { + assert(extent_slab_get(extent)); + return &extent->e_slab_data; +} + +static inline prof_tctx_t * +extent_prof_tctx_get(const extent_t *extent) { + return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx, + ATOMIC_ACQUIRE); +} + +static inline nstime_t +extent_prof_alloc_time_get(const extent_t *extent) { + return extent->e_alloc_time; +} + +static inline void +extent_arena_set(extent_t *extent, arena_t *arena) { + unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U << + MALLOCX_ARENA_BITS) - 1); + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) | + ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT); +} + +static inline void +extent_binshard_set(extent_t *extent, unsigned binshard) { + /* The assertion assumes szind is set already. */ + assert(binshard < bin_infos[extent_szind_get(extent)].n_shards); + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_BINSHARD_MASK) | + ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT); +} + +static inline void +extent_addr_set(extent_t *extent, void *addr) { + extent->e_addr = addr; +} + +static inline void +extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) { + assert(extent_base_get(extent) == extent_addr_get(extent)); + + if (alignment < PAGE) { + unsigned lg_range = LG_PAGE - + lg_floor(CACHELINE_CEILING(alignment)); + size_t r; + if (!tsdn_null(tsdn)) { + tsd_t *tsd = tsdn_tsd(tsdn); + r = (size_t)prng_lg_range_u64( + tsd_offset_statep_get(tsd), lg_range); + } else { + r = prng_lg_range_zu( + &extent_arena_get(extent)->offset_state, + lg_range, true); + } + uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE - + lg_range); + extent->e_addr = (void *)((uintptr_t)extent->e_addr + + random_offset); + assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) == + extent->e_addr); + } +} + +static inline void +extent_size_set(extent_t *extent, size_t size) { + assert((size & ~EXTENT_SIZE_MASK) == 0); + extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK); +} + +static inline void +extent_esn_set(extent_t *extent, size_t esn) { + extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn & + EXTENT_ESN_MASK); +} + +static inline void +extent_bsize_set(extent_t *extent, size_t bsize) { + extent->e_bsize = bsize; +} + +static inline void +extent_szind_set(extent_t *extent, szind_t szind) { + assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */ + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) | + ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT); +} + +static inline void +extent_nfree_set(extent_t *extent, unsigned nfree) { + assert(extent_slab_get(extent)); + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) | + ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT); +} + +static inline void +extent_nfree_binshard_set(extent_t *extent, unsigned nfree, unsigned binshard) { + /* The assertion assumes szind is set already. */ + assert(binshard < bin_infos[extent_szind_get(extent)].n_shards); + extent->e_bits = (extent->e_bits & + (~EXTENT_BITS_NFREE_MASK & ~EXTENT_BITS_BINSHARD_MASK)) | + ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT) | + ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT); +} + +static inline void +extent_nfree_inc(extent_t *extent) { + assert(extent_slab_get(extent)); + extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT); +} + +static inline void +extent_nfree_dec(extent_t *extent) { + assert(extent_slab_get(extent)); + extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT); +} + +static inline void +extent_nfree_sub(extent_t *extent, uint64_t n) { + assert(extent_slab_get(extent)); + extent->e_bits -= (n << EXTENT_BITS_NFREE_SHIFT); +} + +static inline void +extent_sn_set(extent_t *extent, size_t sn) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) | + ((uint64_t)sn << EXTENT_BITS_SN_SHIFT); +} + +static inline void +extent_state_set(extent_t *extent, extent_state_t state) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) | + ((uint64_t)state << EXTENT_BITS_STATE_SHIFT); +} + +static inline void +extent_zeroed_set(extent_t *extent, bool zeroed) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) | + ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT); +} + +static inline void +extent_committed_set(extent_t *extent, bool committed) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) | + ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT); +} + +static inline void +extent_dumpable_set(extent_t *extent, bool dumpable) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) | + ((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT); +} + +static inline void +extent_slab_set(extent_t *extent, bool slab) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) | + ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT); +} + +static inline void +extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) { + atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE); +} + +static inline void +extent_prof_alloc_time_set(extent_t *extent, nstime_t t) { + nstime_copy(&extent->e_alloc_time, &t); +} + +static inline bool +extent_is_head_get(extent_t *extent) { + if (maps_coalesce) { + not_reached(); + } + + return (bool)((extent->e_bits & EXTENT_BITS_IS_HEAD_MASK) >> + EXTENT_BITS_IS_HEAD_SHIFT); +} + +static inline void +extent_is_head_set(extent_t *extent, bool is_head) { + if (maps_coalesce) { + not_reached(); + } + + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_IS_HEAD_MASK) | + ((uint64_t)is_head << EXTENT_BITS_IS_HEAD_SHIFT); +} + +static inline void +extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size, + bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed, + bool committed, bool dumpable, extent_head_state_t is_head) { + assert(addr == PAGE_ADDR2BASE(addr) || !slab); + + extent_arena_set(extent, arena); + extent_addr_set(extent, addr); + extent_size_set(extent, size); + extent_slab_set(extent, slab); + extent_szind_set(extent, szind); + extent_sn_set(extent, sn); + extent_state_set(extent, state); + extent_zeroed_set(extent, zeroed); + extent_committed_set(extent, committed); + extent_dumpable_set(extent, dumpable); + ql_elm_new(extent, ql_link); + if (!maps_coalesce) { + extent_is_head_set(extent, (is_head == EXTENT_IS_HEAD) ? true : + false); + } + if (config_prof) { + extent_prof_tctx_set(extent, NULL); + } +} + +static inline void +extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) { + extent_arena_set(extent, NULL); + extent_addr_set(extent, addr); + extent_bsize_set(extent, bsize); + extent_slab_set(extent, false); + extent_szind_set(extent, SC_NSIZES); + extent_sn_set(extent, sn); + extent_state_set(extent, extent_state_active); + extent_zeroed_set(extent, true); + extent_committed_set(extent, true); + extent_dumpable_set(extent, true); +} + +static inline void +extent_list_init(extent_list_t *list) { + ql_new(list); +} + +static inline extent_t * +extent_list_first(const extent_list_t *list) { + return ql_first(list); +} + +static inline extent_t * +extent_list_last(const extent_list_t *list) { + return ql_last(list, ql_link); +} + +static inline void +extent_list_append(extent_list_t *list, extent_t *extent) { + ql_tail_insert(list, extent, ql_link); +} + +static inline void +extent_list_prepend(extent_list_t *list, extent_t *extent) { + ql_head_insert(list, extent, ql_link); +} + +static inline void +extent_list_replace(extent_list_t *list, extent_t *to_remove, + extent_t *to_insert) { + ql_after_insert(to_remove, to_insert, ql_link); + ql_remove(list, to_remove, ql_link); +} + +static inline void +extent_list_remove(extent_list_t *list, extent_t *extent) { + ql_remove(list, extent, ql_link); +} + +static inline int +extent_sn_comp(const extent_t *a, const extent_t *b) { + size_t a_sn = extent_sn_get(a); + size_t b_sn = extent_sn_get(b); + + return (a_sn > b_sn) - (a_sn < b_sn); +} + +static inline int +extent_esn_comp(const extent_t *a, const extent_t *b) { + size_t a_esn = extent_esn_get(a); + size_t b_esn = extent_esn_get(b); + + return (a_esn > b_esn) - (a_esn < b_esn); +} + +static inline int +extent_ad_comp(const extent_t *a, const extent_t *b) { + uintptr_t a_addr = (uintptr_t)extent_addr_get(a); + uintptr_t b_addr = (uintptr_t)extent_addr_get(b); + + return (a_addr > b_addr) - (a_addr < b_addr); +} + +static inline int +extent_ead_comp(const extent_t *a, const extent_t *b) { + uintptr_t a_eaddr = (uintptr_t)a; + uintptr_t b_eaddr = (uintptr_t)b; + + return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr); +} + +static inline int +extent_snad_comp(const extent_t *a, const extent_t *b) { + int ret; + + ret = extent_sn_comp(a, b); + if (ret != 0) { + return ret; + } + + ret = extent_ad_comp(a, b); + return ret; +} + +static inline int +extent_esnead_comp(const extent_t *a, const extent_t *b) { + int ret; + + ret = extent_esn_comp(a, b); + if (ret != 0) { + return ret; + } + + ret = extent_ead_comp(a, b); + return ret; +} + +#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/extent_mmap.h b/deps/jemalloc/include/jemalloc/internal/extent_mmap.h new file mode 100644 index 0000000..55f17ee --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/extent_mmap.h @@ -0,0 +1,10 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H +#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H + +extern bool opt_retain; + +void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, + bool *zero, bool *commit); +bool extent_dalloc_mmap(void *addr, size_t size); + +#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/extent_structs.h b/deps/jemalloc/include/jemalloc/internal/extent_structs.h new file mode 100644 index 0000000..767cd89 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/extent_structs.h @@ -0,0 +1,256 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H +#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/sc.h" + +typedef enum { + extent_state_active = 0, + extent_state_dirty = 1, + extent_state_muzzy = 2, + extent_state_retained = 3 +} extent_state_t; + +/* Extent (span of pages). Use accessor functions for e_* fields. */ +struct extent_s { + /* + * Bitfield containing several fields: + * + * a: arena_ind + * b: slab + * c: committed + * d: dumpable + * z: zeroed + * t: state + * i: szind + * f: nfree + * s: bin_shard + * n: sn + * + * nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa + * + * arena_ind: Arena from which this extent came, or all 1 bits if + * unassociated. + * + * slab: The slab flag indicates whether the extent is used for a slab + * of small regions. This helps differentiate small size classes, + * and it indicates whether interior pointers can be looked up via + * iealloc(). + * + * committed: The committed flag indicates whether physical memory is + * committed to the extent, whether explicitly or implicitly + * as on a system that overcommits and satisfies physical + * memory needs on demand via soft page faults. + * + * dumpable: The dumpable flag indicates whether or not we've set the + * memory in question to be dumpable. Note that this + * interacts somewhat subtly with user-specified extent hooks, + * since we don't know if *they* are fiddling with + * dumpability (in which case, we don't want to undo whatever + * they're doing). To deal with this scenario, we: + * - Make dumpable false only for memory allocated with the + * default hooks. + * - Only allow memory to go from non-dumpable to dumpable, + * and only once. + * - Never make the OS call to allow dumping when the + * dumpable bit is already set. + * These three constraints mean that we will never + * accidentally dump user memory that the user meant to set + * nondumpable with their extent hooks. + * + * + * zeroed: The zeroed flag is used by extent recycling code to track + * whether memory is zero-filled. + * + * state: The state flag is an extent_state_t. + * + * szind: The szind flag indicates usable size class index for + * allocations residing in this extent, regardless of whether the + * extent is a slab. Extent size and usable size often differ + * even for non-slabs, either due to sz_large_pad or promotion of + * sampled small regions. + * + * nfree: Number of free regions in slab. + * + * bin_shard: the shard of the bin from which this extent came. + * + * sn: Serial number (potentially non-unique). + * + * Serial numbers may wrap around if !opt_retain, but as long as + * comparison functions fall back on address comparison for equal + * serial numbers, stable (if imperfect) ordering is maintained. + * + * Serial numbers may not be unique even in the absence of + * wrap-around, e.g. when splitting an extent and assigning the same + * serial number to both resulting adjacent extents. + */ + uint64_t e_bits; +#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT)) + +#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS +#define EXTENT_BITS_ARENA_SHIFT 0 +#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT) + +#define EXTENT_BITS_SLAB_WIDTH 1 +#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT) +#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT) + +#define EXTENT_BITS_COMMITTED_WIDTH 1 +#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT) +#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT) + +#define EXTENT_BITS_DUMPABLE_WIDTH 1 +#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT) +#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT) + +#define EXTENT_BITS_ZEROED_WIDTH 1 +#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT) +#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT) + +#define EXTENT_BITS_STATE_WIDTH 2 +#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT) +#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT) + +#define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES) +#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT) +#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT) + +#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1) +#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT) +#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT) + +#define EXTENT_BITS_BINSHARD_WIDTH 6 +#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT) +#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT) + +#define EXTENT_BITS_IS_HEAD_WIDTH 1 +#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT) +#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT) + +#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT) +#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT) + + /* Pointer to the extent that this structure is responsible for. */ + void *e_addr; + + union { + /* + * Extent size and serial number associated with the extent + * structure (different than the serial number for the extent at + * e_addr). + * + * ssssssss [...] ssssssss ssssnnnn nnnnnnnn + */ + size_t e_size_esn; + #define EXTENT_SIZE_MASK ((size_t)~(PAGE-1)) + #define EXTENT_ESN_MASK ((size_t)PAGE-1) + /* Base extent size, which may not be a multiple of PAGE. */ + size_t e_bsize; + }; + + /* + * List linkage, used by a variety of lists: + * - bin_t's slabs_full + * - extents_t's LRU + * - stashed dirty extents + * - arena's large allocations + */ + ql_elm(extent_t) ql_link; + + /* + * Linkage for per size class sn/address-ordered heaps, and + * for extent_avail + */ + phn(extent_t) ph_link; + + union { + /* Small region slab metadata. */ + arena_slab_data_t e_slab_data; + + /* Profiling data, used for large objects. */ + struct { + /* Time when this was allocated. */ + nstime_t e_alloc_time; + /* Points to a prof_tctx_t. */ + atomic_p_t e_prof_tctx; + }; + }; +}; +typedef ql_head(extent_t) extent_list_t; +typedef ph(extent_t) extent_tree_t; +typedef ph(extent_t) extent_heap_t; + +/* Quantized collection of extents, with built-in LRU queue. */ +struct extents_s { + malloc_mutex_t mtx; + + /* + * Quantized per size class heaps of extents. + * + * Synchronization: mtx. + */ + extent_heap_t heaps[SC_NPSIZES + 1]; + atomic_zu_t nextents[SC_NPSIZES + 1]; + atomic_zu_t nbytes[SC_NPSIZES + 1]; + + /* + * Bitmap for which set bits correspond to non-empty heaps. + * + * Synchronization: mtx. + */ + bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)]; + + /* + * LRU of all extents in heaps. + * + * Synchronization: mtx. + */ + extent_list_t lru; + + /* + * Page sum for all extents in heaps. + * + * The synchronization here is a little tricky. Modifications to npages + * must hold mtx, but reads need not (though, a reader who sees npages + * without holding the mutex can't assume anything about the rest of the + * state of the extents_t). + */ + atomic_zu_t npages; + + /* All stored extents must be in the same state. */ + extent_state_t state; + + /* + * If true, delay coalescing until eviction; otherwise coalesce during + * deallocation. + */ + bool delay_coalesce; +}; + +/* + * The following two structs are for experimental purposes. See + * experimental_utilization_query_ctl and + * experimental_utilization_batch_query_ctl in src/ctl.c. + */ + +struct extent_util_stats_s { + size_t nfree; + size_t nregs; + size_t size; +}; + +struct extent_util_stats_verbose_s { + void *slabcur_addr; + size_t nfree; + size_t nregs; + size_t size; + size_t bin_nfree; + size_t bin_nregs; +}; + +#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/extent_types.h b/deps/jemalloc/include/jemalloc/internal/extent_types.h new file mode 100644 index 0000000..96925cf --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/extent_types.h @@ -0,0 +1,23 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H +#define JEMALLOC_INTERNAL_EXTENT_TYPES_H + +typedef struct extent_s extent_t; +typedef struct extents_s extents_t; + +typedef struct extent_util_stats_s extent_util_stats_t; +typedef struct extent_util_stats_verbose_s extent_util_stats_verbose_t; + +#define EXTENT_HOOKS_INITIALIZER NULL + +/* + * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit) + * is the max ratio between the size of the active extent and the new extent. + */ +#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6 + +typedef enum { + EXTENT_NOT_HEAD, + EXTENT_IS_HEAD /* Only relevant for Windows && opt.retain. */ +} extent_head_state_t; + +#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/hash.h b/deps/jemalloc/include/jemalloc/internal/hash.h new file mode 100644 index 0000000..0270034 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/hash.h @@ -0,0 +1,319 @@ +#ifndef JEMALLOC_INTERNAL_HASH_H +#define JEMALLOC_INTERNAL_HASH_H + +#include "jemalloc/internal/assert.h" + +/* + * The following hash function is based on MurmurHash3, placed into the public + * domain by Austin Appleby. See https://github.com/aappleby/smhasher for + * details. + */ + +/******************************************************************************/ +/* Internal implementation. */ +static inline uint32_t +hash_rotl_32(uint32_t x, int8_t r) { + return ((x << r) | (x >> (32 - r))); +} + +static inline uint64_t +hash_rotl_64(uint64_t x, int8_t r) { + return ((x << r) | (x >> (64 - r))); +} + +static inline uint32_t +hash_get_block_32(const uint32_t *p, int i) { + /* Handle unaligned read. */ + if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { + uint32_t ret; + + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); + return ret; + } + + return p[i]; +} + +static inline uint64_t +hash_get_block_64(const uint64_t *p, int i) { + /* Handle unaligned read. */ + if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { + uint64_t ret; + + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); + return ret; + } + + return p[i]; +} + +static inline uint32_t +hash_fmix_32(uint32_t h) { + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + + return h; +} + +static inline uint64_t +hash_fmix_64(uint64_t k) { + k ^= k >> 33; + k *= KQU(0xff51afd7ed558ccd); + k ^= k >> 33; + k *= KQU(0xc4ceb9fe1a85ec53); + k ^= k >> 33; + + return k; +} + +static inline uint32_t +hash_x86_32(const void *key, int len, uint32_t seed) { + const uint8_t *data = (const uint8_t *) key; + const int nblocks = len / 4; + + uint32_t h1 = seed; + + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + /* body */ + { + const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); + int i; + + for (i = -nblocks; i; i++) { + uint32_t k1 = hash_get_block_32(blocks, i); + + k1 *= c1; + k1 = hash_rotl_32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = hash_rotl_32(h1, 13); + h1 = h1*5 + 0xe6546b64; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t *) (data + nblocks*4); + + uint32_t k1 = 0; + + switch (len & 3) { + case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH + case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH + case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); + k1 *= c2; h1 ^= k1; + } + } + + /* finalization */ + h1 ^= len; + + h1 = hash_fmix_32(h1); + + return h1; +} + +static inline void +hash_x86_128(const void *key, const int len, uint32_t seed, + uint64_t r_out[2]) { + const uint8_t * data = (const uint8_t *) key; + const int nblocks = len / 16; + + uint32_t h1 = seed; + uint32_t h2 = seed; + uint32_t h3 = seed; + uint32_t h4 = seed; + + const uint32_t c1 = 0x239b961b; + const uint32_t c2 = 0xab0e9789; + const uint32_t c3 = 0x38b34ae5; + const uint32_t c4 = 0xa1e38b93; + + /* body */ + { + const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); + int i; + + for (i = -nblocks; i; i++) { + uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); + uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); + uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); + uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); + + k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; + + h1 = hash_rotl_32(h1, 19); h1 += h2; + h1 = h1*5 + 0x561ccd1b; + + k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; + + h2 = hash_rotl_32(h2, 17); h2 += h3; + h2 = h2*5 + 0x0bcaa747; + + k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; + + h3 = hash_rotl_32(h3, 15); h3 += h4; + h3 = h3*5 + 0x96cd1c35; + + k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; + + h4 = hash_rotl_32(h4, 13); h4 += h1; + h4 = h4*5 + 0x32ac3b17; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t *) (data + nblocks*16); + uint32_t k1 = 0; + uint32_t k2 = 0; + uint32_t k3 = 0; + uint32_t k4 = 0; + + switch (len & 15) { + case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH + case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH + case 13: k4 ^= tail[12] << 0; + k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; + JEMALLOC_FALLTHROUGH + case 12: k3 ^= tail[11] << 24; JEMALLOC_FALLTHROUGH + case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH + case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH + case 9: k3 ^= tail[ 8] << 0; + k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; + JEMALLOC_FALLTHROUGH + case 8: k2 ^= tail[ 7] << 24; JEMALLOC_FALLTHROUGH + case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH + case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH + case 5: k2 ^= tail[ 4] << 0; + k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; + JEMALLOC_FALLTHROUGH + case 4: k1 ^= tail[ 3] << 24; JEMALLOC_FALLTHROUGH + case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH + case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH + case 1: k1 ^= tail[ 0] << 0; + k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; + JEMALLOC_FALLTHROUGH + } + } + + /* finalization */ + h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + h1 = hash_fmix_32(h1); + h2 = hash_fmix_32(h2); + h3 = hash_fmix_32(h3); + h4 = hash_fmix_32(h4); + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + r_out[0] = (((uint64_t) h2) << 32) | h1; + r_out[1] = (((uint64_t) h4) << 32) | h3; +} + +static inline void +hash_x64_128(const void *key, const int len, const uint32_t seed, + uint64_t r_out[2]) { + const uint8_t *data = (const uint8_t *) key; + const int nblocks = len / 16; + + uint64_t h1 = seed; + uint64_t h2 = seed; + + const uint64_t c1 = KQU(0x87c37b91114253d5); + const uint64_t c2 = KQU(0x4cf5ad432745937f); + + /* body */ + { + const uint64_t *blocks = (const uint64_t *) (data); + int i; + + for (i = 0; i < nblocks; i++) { + uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); + uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); + + k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; + + h1 = hash_rotl_64(h1, 27); h1 += h2; + h1 = h1*5 + 0x52dce729; + + k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; + + h2 = hash_rotl_64(h2, 31); h2 += h1; + h2 = h2*5 + 0x38495ab5; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t*)(data + nblocks*16); + uint64_t k1 = 0; + uint64_t k2 = 0; + + switch (len & 15) { + case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH + case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH + case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH + case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH + case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH + case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH + case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; + k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; + JEMALLOC_FALLTHROUGH + case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH + case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH + case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH + case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH + case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH + case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH + case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH + case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; + k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; + } + } + + /* finalization */ + h1 ^= len; h2 ^= len; + + h1 += h2; + h2 += h1; + + h1 = hash_fmix_64(h1); + h2 = hash_fmix_64(h2); + + h1 += h2; + h2 += h1; + + r_out[0] = h1; + r_out[1] = h2; +} + +/******************************************************************************/ +/* API. */ +static inline void +hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) { + assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ + +#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) + hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); +#else + { + uint64_t hashes[2]; + hash_x86_128(key, (int)len, seed, hashes); + r_hash[0] = (size_t)hashes[0]; + r_hash[1] = (size_t)hashes[1]; + } +#endif +} + +#endif /* JEMALLOC_INTERNAL_HASH_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/hook.h b/deps/jemalloc/include/jemalloc/internal/hook.h new file mode 100644 index 0000000..ee246b1 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/hook.h @@ -0,0 +1,163 @@ +#ifndef JEMALLOC_INTERNAL_HOOK_H +#define JEMALLOC_INTERNAL_HOOK_H + +#include "jemalloc/internal/tsd.h" + +/* + * This API is *extremely* experimental, and may get ripped out, changed in API- + * and ABI-incompatible ways, be insufficiently or incorrectly documented, etc. + * + * It allows hooking the stateful parts of the API to see changes as they + * happen. + * + * Allocation hooks are called after the allocation is done, free hooks are + * called before the free is done, and expand hooks are called after the + * allocation is expanded. + * + * For realloc and rallocx, if the expansion happens in place, the expansion + * hook is called. If it is moved, then the alloc hook is called on the new + * location, and then the free hook is called on the old location (i.e. both + * hooks are invoked in between the alloc and the dalloc). + * + * If we return NULL from OOM, then usize might not be trustworthy. Calling + * realloc(NULL, size) only calls the alloc hook, and calling realloc(ptr, 0) + * only calls the free hook. (Calling realloc(NULL, 0) is treated as malloc(0), + * and only calls the alloc hook). + * + * Reentrancy: + * Reentrancy is guarded against from within the hook implementation. If you + * call allocator functions from within a hook, the hooks will not be invoked + * again. + * Threading: + * The installation of a hook synchronizes with all its uses. If you can + * prove the installation of a hook happens-before a jemalloc entry point, + * then the hook will get invoked (unless there's a racing removal). + * + * Hook insertion appears to be atomic at a per-thread level (i.e. if a thread + * allocates and has the alloc hook invoked, then a subsequent free on the + * same thread will also have the free hook invoked). + * + * The *removal* of a hook does *not* block until all threads are done with + * the hook. Hook authors have to be resilient to this, and need some + * out-of-band mechanism for cleaning up any dynamically allocated memory + * associated with their hook. + * Ordering: + * Order of hook execution is unspecified, and may be different than insertion + * order. + */ + +#define HOOK_MAX 4 + +enum hook_alloc_e { + hook_alloc_malloc, + hook_alloc_posix_memalign, + hook_alloc_aligned_alloc, + hook_alloc_calloc, + hook_alloc_memalign, + hook_alloc_valloc, + hook_alloc_mallocx, + + /* The reallocating functions have both alloc and dalloc variants */ + hook_alloc_realloc, + hook_alloc_rallocx, +}; +/* + * We put the enum typedef after the enum, since this file may get included by + * jemalloc_cpp.cpp, and C++ disallows enum forward declarations. + */ +typedef enum hook_alloc_e hook_alloc_t; + +enum hook_dalloc_e { + hook_dalloc_free, + hook_dalloc_dallocx, + hook_dalloc_sdallocx, + + /* + * The dalloc halves of reallocation (not called if in-place expansion + * happens). + */ + hook_dalloc_realloc, + hook_dalloc_rallocx, +}; +typedef enum hook_dalloc_e hook_dalloc_t; + + +enum hook_expand_e { + hook_expand_realloc, + hook_expand_rallocx, + hook_expand_xallocx, +}; +typedef enum hook_expand_e hook_expand_t; + +typedef void (*hook_alloc)( + void *extra, hook_alloc_t type, void *result, uintptr_t result_raw, + uintptr_t args_raw[3]); + +typedef void (*hook_dalloc)( + void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]); + +typedef void (*hook_expand)( + void *extra, hook_expand_t type, void *address, size_t old_usize, + size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]); + +typedef struct hooks_s hooks_t; +struct hooks_s { + hook_alloc alloc_hook; + hook_dalloc dalloc_hook; + hook_expand expand_hook; + void *extra; +}; + +/* + * Begin implementation details; everything above this point might one day live + * in a public API. Everything below this point never will. + */ + +/* + * The realloc pathways haven't gotten any refactoring love in a while, and it's + * fairly difficult to pass information from the entry point to the hooks. We + * put the informaiton the hooks will need into a struct to encapsulate + * everything. + * + * Much of these pathways are force-inlined, so that the compiler can avoid + * materializing this struct until we hit an extern arena function. For fairly + * goofy reasons, *many* of the realloc paths hit an extern arena function. + * These paths are cold enough that it doesn't matter; eventually, we should + * rewrite the realloc code to make the expand-in-place and the + * free-then-realloc paths more orthogonal, at which point we don't need to + * spread the hook logic all over the place. + */ +typedef struct hook_ralloc_args_s hook_ralloc_args_t; +struct hook_ralloc_args_s { + /* I.e. as opposed to rallocx. */ + bool is_realloc; + /* + * The expand hook takes 4 arguments, even if only 3 are actually used; + * we add an extra one in case the user decides to memcpy without + * looking too closely at the hooked function. + */ + uintptr_t args[4]; +}; + +/* + * Returns an opaque handle to be used when removing the hook. NULL means that + * we couldn't install the hook. + */ +bool hook_boot(); + +void *hook_install(tsdn_t *tsdn, hooks_t *hooks); +/* Uninstalls the hook with the handle previously returned from hook_install. */ +void hook_remove(tsdn_t *tsdn, void *opaque); + +/* Hooks */ + +void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw, + uintptr_t args_raw[3]); + +void hook_invoke_dalloc(hook_dalloc_t type, void *address, + uintptr_t args_raw[3]); + +void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize, + size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]); + +#endif /* JEMALLOC_INTERNAL_HOOK_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h new file mode 100644 index 0000000..7d6053e --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h @@ -0,0 +1,94 @@ +#ifndef JEMALLOC_INTERNAL_DECLS_H +#define JEMALLOC_INTERNAL_DECLS_H + +#include +#ifdef _WIN32 +# include +# include "msvc_compat/windows_extra.h" +# ifdef _WIN64 +# if LG_VADDR <= 32 +# error Generate the headers using x64 vcargs +# endif +# else +# if LG_VADDR > 32 +# undef LG_VADDR +# define LG_VADDR 32 +# endif +# endif +#else +# include +# include +# if !defined(__pnacl__) && !defined(__native_client__) +# include +# if !defined(SYS_write) && defined(__NR_write) +# define SYS_write __NR_write +# endif +# if defined(SYS_open) && defined(__aarch64__) + /* Android headers may define SYS_open to __NR_open even though + * __NR_open may not exist on AArch64 (superseded by __NR_openat). */ +# undef SYS_open +# endif +# include +# endif +# include +# ifdef __FreeBSD__ +# include +# endif +# include +# ifdef JEMALLOC_OS_UNFAIR_LOCK +# include +# endif +# ifdef JEMALLOC_GLIBC_MALLOC_HOOK +# include +# endif +# include +# include +# include +# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME +# include +# endif +#endif +#include + +#include +#ifndef SIZE_T_MAX +# define SIZE_T_MAX SIZE_MAX +#endif +#ifndef SSIZE_MAX +# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1)) +#endif +#include +#include +#include +#include +#include +#include +#ifndef offsetof +# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) +#endif +#include +#include +#include +#ifdef _MSC_VER +# include +typedef intptr_t ssize_t; +# define PATH_MAX 1024 +# define STDERR_FILENO 2 +# define __func__ __FUNCTION__ +# ifdef JEMALLOC_HAS_RESTRICT +# define restrict __restrict +# endif +/* Disable warnings about deprecated system functions. */ +# pragma warning(disable: 4996) +#if _MSC_VER < 1800 +static int +isblank(int c) { + return (c == '\t' || c == ' '); +} +#endif +#else +# include +#endif +#include + +#endif /* JEMALLOC_INTERNAL_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in new file mode 100644 index 0000000..c442a21 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -0,0 +1,366 @@ +#ifndef JEMALLOC_INTERNAL_DEFS_H_ +#define JEMALLOC_INTERNAL_DEFS_H_ +/* + * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all + * public APIs to be prefixed. This makes it possible, with some care, to use + * multiple allocators simultaneously. + */ +#undef JEMALLOC_PREFIX +#undef JEMALLOC_CPREFIX + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#undef JEMALLOC_OVERRIDE___LIBC_CALLOC +#undef JEMALLOC_OVERRIDE___LIBC_FREE +#undef JEMALLOC_OVERRIDE___LIBC_MALLOC +#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN +#undef JEMALLOC_OVERRIDE___LIBC_REALLOC +#undef JEMALLOC_OVERRIDE___LIBC_VALLOC +#undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN + +/* + * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. + * For shared libraries, symbol visibility mechanisms prevent these symbols + * from being exported, but for static libraries, naming collisions are a real + * possibility. + */ +#undef JEMALLOC_PRIVATE_NAMESPACE + +/* + * Hyper-threaded CPUs may need a special instruction inside spin loops in + * order to yield to another virtual CPU. + */ +#undef CPU_SPINWAIT +/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */ +#undef HAVE_CPU_SPINWAIT + +/* + * Number of significant bits in virtual addresses. This may be less than the + * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 + * bits are the same as bit 47. + */ +#undef LG_VADDR + +/* Defined if C11 atomics are available. */ +#undef JEMALLOC_C11_ATOMICS + +/* Defined if GCC __atomic atomics are available. */ +#undef JEMALLOC_GCC_ATOMIC_ATOMICS +/* and the 8-bit variant support. */ +#undef JEMALLOC_GCC_U8_ATOMIC_ATOMICS + +/* Defined if GCC __sync atomics are available. */ +#undef JEMALLOC_GCC_SYNC_ATOMICS +/* and the 8-bit variant support. */ +#undef JEMALLOC_GCC_U8_SYNC_ATOMICS + +/* + * Defined if __builtin_clz() and __builtin_clzl() are available. + */ +#undef JEMALLOC_HAVE_BUILTIN_CLZ + +/* + * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. + */ +#undef JEMALLOC_OS_UNFAIR_LOCK + +/* Defined if syscall(2) is usable. */ +#undef JEMALLOC_USE_SYSCALL + +/* + * Defined if secure_getenv(3) is available. + */ +#undef JEMALLOC_HAVE_SECURE_GETENV + +/* + * Defined if issetugid(2) is available. + */ +#undef JEMALLOC_HAVE_ISSETUGID + +/* Defined if pthread_atfork(3) is available. */ +#undef JEMALLOC_HAVE_PTHREAD_ATFORK + +/* Defined if pthread_setname_np(3) is available. */ +#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. + */ +#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. + */ +#undef JEMALLOC_HAVE_CLOCK_MONOTONIC + +/* + * Defined if mach_absolute_time() is available. + */ +#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME + +/* + * Defined if _malloc_thread_cleanup() exists. At least in the case of + * FreeBSD, pthread_key_create() allocates, which if used during malloc + * bootstrapping will cause recursion into the pthreads library. Therefore, if + * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in + * malloc_tsd. + */ +#undef JEMALLOC_MALLOC_THREAD_CLEANUP + +/* + * Defined if threaded initialization is known to be safe on this platform. + * Among other things, it must be possible to initialize a mutex without + * triggering allocation in order for threaded allocation to be safe. + */ +#undef JEMALLOC_THREADED_INIT + +/* + * Defined if the pthreads implementation defines + * _pthread_mutex_init_calloc_cb(), in which case the function is used in order + * to avoid recursive allocation during mutex initialization. + */ +#undef JEMALLOC_MUTEX_INIT_CB + +/* Non-empty if the tls_model attribute is supported. */ +#undef JEMALLOC_TLS_MODEL + +/* + * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables + * inline functions. + */ +#undef JEMALLOC_DEBUG + +/* JEMALLOC_STATS enables statistics calculation. */ +#undef JEMALLOC_STATS + +/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */ +#undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API + +/* JEMALLOC_PROF enables allocation profiling. */ +#undef JEMALLOC_PROF + +/* Use libunwind for profile backtracing if defined. */ +#undef JEMALLOC_PROF_LIBUNWIND + +/* Use libgcc for profile backtracing if defined. */ +#undef JEMALLOC_PROF_LIBGCC + +/* Use gcc intrinsics for profile backtracing if defined. */ +#undef JEMALLOC_PROF_GCC + +/* + * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage + * segment (DSS). + */ +#undef JEMALLOC_DSS + +/* Support memory filling (junk/zero). */ +#undef JEMALLOC_FILL + +/* Support utrace(2)-based tracing. */ +#undef JEMALLOC_UTRACE + +/* Support optional abort() on OOM. */ +#undef JEMALLOC_XMALLOC + +/* Support lazy locking (avoid locking unless a second thread is launched). */ +#undef JEMALLOC_LAZY_LOCK + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +#undef LG_QUANTUM + +/* One page is 2^LG_PAGE bytes. */ +#undef LG_PAGE + +/* + * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the + * system does not explicitly support huge pages; system calls that require + * explicit huge page support are separately configured. + */ +#undef LG_HUGEPAGE + +/* + * If defined, adjacent virtual memory mappings with identical attributes + * automatically coalesce, and they fragment when changes are made to subranges. + * This is the normal order of things for mmap()/munmap(), but on Windows + * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. + * mappings do *not* coalesce/fragment. + */ +#undef JEMALLOC_MAPS_COALESCE + +/* + * If defined, retain memory for later reuse by default rather than using e.g. + * munmap() to unmap freed extents. This is enabled on 64-bit Linux because + * common sequences of mmap()/munmap() calls will cause virtual memory map + * holes. + */ +#undef JEMALLOC_RETAIN + +/* TLS is used to map arenas and magazine caches to threads. */ +#undef JEMALLOC_TLS + +/* + * Used to mark unreachable code to quiet "end of non-void" compiler warnings. + * Don't use this directly; instead use unreachable() from util.h + */ +#undef JEMALLOC_INTERNAL_UNREACHABLE + +/* + * ffs*() functions to use for bitmapping. Don't use these directly; instead, + * use ffs_*() from util.h. + */ +#undef JEMALLOC_INTERNAL_FFSLL +#undef JEMALLOC_INTERNAL_FFSL +#undef JEMALLOC_INTERNAL_FFS + +/* + * popcount*() functions to use for bitmapping. + */ +#undef JEMALLOC_INTERNAL_POPCOUNTL +#undef JEMALLOC_INTERNAL_POPCOUNT + +/* + * If defined, explicitly attempt to more uniformly distribute large allocation + * pointer alignments across all cache indices. + */ +#undef JEMALLOC_CACHE_OBLIVIOUS + +/* + * If defined, enable logging facilities. We make this a configure option to + * avoid taking extra branches everywhere. + */ +#undef JEMALLOC_LOG + +/* + * If defined, use readlinkat() (instead of readlink()) to follow + * /etc/malloc_conf. + */ +#undef JEMALLOC_READLINKAT + +/* + * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. + */ +#undef JEMALLOC_ZONE + +/* + * Methods for determining whether the OS overcommits. + * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's + * /proc/sys/vm.overcommit_memory file. + * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. + */ +#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT +#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY + +/* Defined if madvise(2) is available. */ +#undef JEMALLOC_HAVE_MADVISE + +/* + * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE + * arguments to madvise(2). + */ +#undef JEMALLOC_HAVE_MADVISE_HUGE + +/* + * Methods for purging unused pages differ between operating systems. + * + * madvise(..., MADV_FREE) : This marks pages as being unused, such that they + * will be discarded rather than swapped out. + * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is + * defined, this immediately discards pages, + * such that new pages will be demand-zeroed if + * the address region is later touched; + * otherwise this behaves similarly to + * MADV_FREE, though typically with higher + * system overhead. + */ +#undef JEMALLOC_PURGE_MADVISE_FREE +#undef JEMALLOC_PURGE_MADVISE_DONTNEED +#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS + +/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */ +#undef JEMALLOC_DEFINE_MADVISE_FREE + +/* + * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise. + */ +#undef JEMALLOC_MADVISE_DONTDUMP + +/* + * Defined if transparent huge pages (THPs) are supported via the + * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. + */ +#undef JEMALLOC_THP + +/* Define if operating system has alloca.h header. */ +#undef JEMALLOC_HAS_ALLOCA_H + +/* C99 restrict keyword supported. */ +#undef JEMALLOC_HAS_RESTRICT + +/* For use by hash code. */ +#undef JEMALLOC_BIG_ENDIAN + +/* sizeof(int) == 2^LG_SIZEOF_INT. */ +#undef LG_SIZEOF_INT + +/* sizeof(long) == 2^LG_SIZEOF_LONG. */ +#undef LG_SIZEOF_LONG + +/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ +#undef LG_SIZEOF_LONG_LONG + +/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ +#undef LG_SIZEOF_INTMAX_T + +/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ +#undef JEMALLOC_GLIBC_MALLOC_HOOK + +/* glibc memalign hook. */ +#undef JEMALLOC_GLIBC_MEMALIGN_HOOK + +/* pthread support */ +#undef JEMALLOC_HAVE_PTHREAD + +/* dlsym() support */ +#undef JEMALLOC_HAVE_DLSYM + +/* Adaptive mutex support in pthreads. */ +#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP + +/* GNU specific sched_getcpu support */ +#undef JEMALLOC_HAVE_SCHED_GETCPU + +/* GNU specific sched_setaffinity support */ +#undef JEMALLOC_HAVE_SCHED_SETAFFINITY + +/* + * If defined, all the features necessary for background threads are present. + */ +#undef JEMALLOC_BACKGROUND_THREAD + +/* + * If defined, jemalloc symbols are not exported (doesn't work when + * JEMALLOC_PREFIX is not defined). + */ +#undef JEMALLOC_EXPORT + +/* config.malloc_conf options string. */ +#undef JEMALLOC_CONFIG_MALLOC_CONF + +/* If defined, jemalloc takes the malloc/free/etc. symbol names. */ +#undef JEMALLOC_IS_MALLOC + +/* + * Defined if strerror_r returns char * if _GNU_SOURCE is defined. + */ +#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE + +/* Performs additional safety checks when defined. */ +#undef JEMALLOC_OPT_SAFETY_CHECKS + +#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h new file mode 100644 index 0000000..d291170 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h @@ -0,0 +1,57 @@ +#ifndef JEMALLOC_INTERNAL_EXTERNS_H +#define JEMALLOC_INTERNAL_EXTERNS_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/tsd_types.h" + +/* TSD checks this to set thread local slow state accordingly. */ +extern bool malloc_slow; + +/* Run-time options. */ +extern bool opt_abort; +extern bool opt_abort_conf; +extern bool opt_confirm_conf; +extern const char *opt_junk; +extern bool opt_junk_alloc; +extern bool opt_junk_free; +extern bool opt_utrace; +extern bool opt_xmalloc; +extern bool opt_zero; +extern unsigned opt_narenas; + +/* Number of CPUs. */ +extern unsigned ncpus; + +/* Number of arenas used for automatic multiplexing of threads and arenas. */ +extern unsigned narenas_auto; + +/* Base index for manual arenas. */ +extern unsigned manual_arena_base; + +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + */ +extern atomic_p_t arenas[]; + +void *a0malloc(size_t size); +void a0dalloc(void *ptr); +void *bootstrap_malloc(size_t size); +void *bootstrap_calloc(size_t num, size_t size); +void bootstrap_free(void *ptr); +void arena_set(unsigned ind, arena_t *arena); +unsigned narenas_total_get(void); +arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); +arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); +arena_t *arena_choose_hard(tsd_t *tsd, bool internal); +void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); +void iarena_cleanup(tsd_t *tsd); +void arena_cleanup(tsd_t *tsd); +void arenas_tdata_cleanup(tsd_t *tsd); +void jemalloc_prefork(void); +void jemalloc_postfork_parent(void); +void jemalloc_postfork_child(void); +bool malloc_initialized(void); +void je_sdallocx_noflags(void *ptr, size_t size); + +#endif /* JEMALLOC_INTERNAL_EXTERNS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h new file mode 100644 index 0000000..437eaa4 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h @@ -0,0 +1,94 @@ +#ifndef JEMALLOC_INTERNAL_INCLUDES_H +#define JEMALLOC_INTERNAL_INCLUDES_H + +/* + * jemalloc can conceptually be broken into components (arena, tcache, etc.), + * but there are circular dependencies that cannot be broken without + * substantial performance degradation. + * + * Historically, we dealt with this by each header into four sections (types, + * structs, externs, and inlines), and included each header file multiple times + * in this file, picking out the portion we want on each pass using the + * following #defines: + * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data + * types. + * JEMALLOC_H_STRUCTS : Data structures. + * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. + * JEMALLOC_H_INLINES : Inline functions. + * + * We're moving toward a world in which the dependencies are explicit; each file + * will #include the headers it depends on (rather than relying on them being + * implicitly available via this file including every header file in the + * project). + * + * We're now in an intermediate state: we've broken up the header files to avoid + * having to include each one multiple times, but have not yet moved the + * dependency information into the header files (i.e. we still rely on the + * ordering in this file to ensure all a header's dependencies are available in + * its translation unit). Each component is now broken up into multiple header + * files, corresponding to the sections above (e.g. instead of "foo.h", we now + * have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h"). + * + * Those files which have been converted to explicitly include their + * inter-component dependencies are now in the initial HERMETIC HEADERS + * section. All headers may still rely on jemalloc_preamble.h (which, by fiat, + * must be included first in every translation unit) for system headers and + * global jemalloc definitions, however. + */ + +/******************************************************************************/ +/* TYPES */ +/******************************************************************************/ + +#include "jemalloc/internal/extent_types.h" +#include "jemalloc/internal/base_types.h" +#include "jemalloc/internal/arena_types.h" +#include "jemalloc/internal/tcache_types.h" +#include "jemalloc/internal/prof_types.h" + +/******************************************************************************/ +/* STRUCTS */ +/******************************************************************************/ + +#include "jemalloc/internal/arena_structs_a.h" +#include "jemalloc/internal/extent_structs.h" +#include "jemalloc/internal/base_structs.h" +#include "jemalloc/internal/prof_structs.h" +#include "jemalloc/internal/arena_structs_b.h" +#include "jemalloc/internal/tcache_structs.h" +#include "jemalloc/internal/background_thread_structs.h" + +/******************************************************************************/ +/* EXTERNS */ +/******************************************************************************/ + +#include "jemalloc/internal/jemalloc_internal_externs.h" +#include "jemalloc/internal/extent_externs.h" +#include "jemalloc/internal/base_externs.h" +#include "jemalloc/internal/arena_externs.h" +#include "jemalloc/internal/large_externs.h" +#include "jemalloc/internal/tcache_externs.h" +#include "jemalloc/internal/prof_externs.h" +#include "jemalloc/internal/background_thread_externs.h" + +/******************************************************************************/ +/* INLINES */ +/******************************************************************************/ + +#include "jemalloc/internal/jemalloc_internal_inlines_a.h" +#include "jemalloc/internal/base_inlines.h" +/* + * Include portions of arena code interleaved with tcache code in order to + * resolve circular dependencies. + */ +#include "jemalloc/internal/prof_inlines_a.h" +#include "jemalloc/internal/arena_inlines_a.h" +#include "jemalloc/internal/extent_inlines.h" +#include "jemalloc/internal/jemalloc_internal_inlines_b.h" +#include "jemalloc/internal/tcache_inlines.h" +#include "jemalloc/internal/arena_inlines_b.h" +#include "jemalloc/internal/jemalloc_internal_inlines_c.h" +#include "jemalloc/internal/prof_inlines_b.h" +#include "jemalloc/internal/background_thread_inlines.h" + +#endif /* JEMALLOC_INTERNAL_INCLUDES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h new file mode 100644 index 0000000..ddde9b4 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h @@ -0,0 +1,174 @@ +#ifndef JEMALLOC_INTERNAL_INLINES_A_H +#define JEMALLOC_INTERNAL_INLINES_A_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/ticker.h" + +JEMALLOC_ALWAYS_INLINE malloc_cpuid_t +malloc_getcpu(void) { + assert(have_percpu_arena); +#if defined(_WIN32) + return GetCurrentProcessorNumber(); +#elif defined(JEMALLOC_HAVE_SCHED_GETCPU) + return (malloc_cpuid_t)sched_getcpu(); +#else + not_reached(); + return -1; +#endif +} + +/* Return the chosen arena index based on current cpu. */ +JEMALLOC_ALWAYS_INLINE unsigned +percpu_arena_choose(void) { + assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)); + + malloc_cpuid_t cpuid = malloc_getcpu(); + assert(cpuid >= 0); + + unsigned arena_ind; + if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus / + 2)) { + arena_ind = cpuid; + } else { + assert(opt_percpu_arena == per_phycpu_arena); + /* Hyper threads on the same physical CPU share arena. */ + arena_ind = cpuid - ncpus / 2; + } + + return arena_ind; +} + +/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */ +JEMALLOC_ALWAYS_INLINE unsigned +percpu_arena_ind_limit(percpu_arena_mode_t mode) { + assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode)); + if (mode == per_phycpu_arena && ncpus > 1) { + if (ncpus % 2) { + /* This likely means a misconfig. */ + return ncpus / 2 + 1; + } + return ncpus / 2; + } else { + return ncpus; + } +} + +static inline arena_tdata_t * +arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) { + arena_tdata_t *tdata; + arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); + + if (unlikely(arenas_tdata == NULL)) { + /* arenas_tdata hasn't been initialized yet. */ + return arena_tdata_get_hard(tsd, ind); + } + if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) { + /* + * ind is invalid, cache is old (too small), or tdata to be + * initialized. + */ + return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) : + NULL); + } + + tdata = &arenas_tdata[ind]; + if (likely(tdata != NULL) || !refresh_if_missing) { + return tdata; + } + return arena_tdata_get_hard(tsd, ind); +} + +static inline arena_t * +arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { + arena_t *ret; + + assert(ind < MALLOCX_ARENA_LIMIT); + + ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE); + if (unlikely(ret == NULL)) { + if (init_if_missing) { + ret = arena_init(tsdn, ind, + (extent_hooks_t *)&extent_hooks_default); + } + } + return ret; +} + +static inline ticker_t * +decay_ticker_get(tsd_t *tsd, unsigned ind) { + arena_tdata_t *tdata; + + tdata = arena_tdata_get(tsd, ind, true); + if (unlikely(tdata == NULL)) { + return NULL; + } + return &tdata->decay_ticker; +} + +JEMALLOC_ALWAYS_INLINE cache_bin_t * +tcache_small_bin_get(tcache_t *tcache, szind_t binind) { + assert(binind < SC_NBINS); + return &tcache->bins_small[binind]; +} + +JEMALLOC_ALWAYS_INLINE cache_bin_t * +tcache_large_bin_get(tcache_t *tcache, szind_t binind) { + assert(binind >= SC_NBINS &&binind < nhbins); + return &tcache->bins_large[binind - SC_NBINS]; +} + +JEMALLOC_ALWAYS_INLINE bool +tcache_available(tsd_t *tsd) { + /* + * Thread specific auto tcache might be unavailable if: 1) during tcache + * initialization, or 2) disabled through thread.tcache.enabled mallctl + * or config options. This check covers all cases. + */ + if (likely(tsd_tcache_enabled_get(tsd))) { + /* Associated arena == NULL implies tcache init in progress. */ + assert(tsd_tcachep_get(tsd)->arena == NULL || + tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail != + NULL); + return true; + } + + return false; +} + +JEMALLOC_ALWAYS_INLINE tcache_t * +tcache_get(tsd_t *tsd) { + if (!tcache_available(tsd)) { + return NULL; + } + + return tsd_tcachep_get(tsd); +} + +static inline void +pre_reentrancy(tsd_t *tsd, arena_t *arena) { + /* arena is the current context. Reentry from a0 is not allowed. */ + assert(arena != arena_get(tsd_tsdn(tsd), 0, false)); + + bool fast = tsd_fast(tsd); + assert(tsd_reentrancy_level_get(tsd) < INT8_MAX); + ++*tsd_reentrancy_levelp_get(tsd); + if (fast) { + /* Prepare slow path for reentrancy. */ + tsd_slow_update(tsd); + assert(tsd_state_get(tsd) == tsd_state_nominal_slow); + } +} + +static inline void +post_reentrancy(tsd_t *tsd) { + int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd); + assert(*reentrancy_level > 0); + if (--*reentrancy_level == 0) { + tsd_slow_update(tsd); + } +} + +#endif /* JEMALLOC_INTERNAL_INLINES_A_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h new file mode 100644 index 0000000..70d6e57 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h @@ -0,0 +1,87 @@ +#ifndef JEMALLOC_INTERNAL_INLINES_B_H +#define JEMALLOC_INTERNAL_INLINES_B_H + +#include "jemalloc/internal/rtree.h" + +/* Choose an arena based on a per-thread value. */ +static inline arena_t * +arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { + arena_t *ret; + + if (arena != NULL) { + return arena; + } + + /* During reentrancy, arena 0 is the safest bet. */ + if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) { + return arena_get(tsd_tsdn(tsd), 0, true); + } + + ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); + if (unlikely(ret == NULL)) { + ret = arena_choose_hard(tsd, internal); + assert(ret); + if (tcache_available(tsd)) { + tcache_t *tcache = tcache_get(tsd); + if (tcache->arena != NULL) { + /* See comments in tcache_data_init().*/ + assert(tcache->arena == + arena_get(tsd_tsdn(tsd), 0, false)); + if (tcache->arena != ret) { + tcache_arena_reassociate(tsd_tsdn(tsd), + tcache, ret); + } + } else { + tcache_arena_associate(tsd_tsdn(tsd), tcache, + ret); + } + } + } + + /* + * Note that for percpu arena, if the current arena is outside of the + * auto percpu arena range, (i.e. thread is assigned to a manually + * managed arena), then percpu arena is skipped. + */ + if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) && + !internal && (arena_ind_get(ret) < + percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd != + tsd_tsdn(tsd))) { + unsigned ind = percpu_arena_choose(); + if (arena_ind_get(ret) != ind) { + percpu_arena_update(tsd, ind); + ret = tsd_arena_get(tsd); + } + ret->last_thd = tsd_tsdn(tsd); + } + + return ret; +} + +static inline arena_t * +arena_choose(tsd_t *tsd, arena_t *arena) { + return arena_choose_impl(tsd, arena, false); +} + +static inline arena_t * +arena_ichoose(tsd_t *tsd, arena_t *arena) { + return arena_choose_impl(tsd, arena, true); +} + +static inline bool +arena_is_auto(arena_t *arena) { + assert(narenas_auto > 0); + + return (arena_ind_get(arena) < manual_arena_base); +} + +JEMALLOC_ALWAYS_INLINE extent_t * +iealloc(tsdn_t *tsdn, const void *ptr) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true); +} + +#endif /* JEMALLOC_INTERNAL_INLINES_B_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h new file mode 100644 index 0000000..0775b35 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h @@ -0,0 +1,273 @@ +#ifndef JEMALLOC_INTERNAL_INLINES_C_H +#define JEMALLOC_INTERNAL_INLINES_C_H + +#include "jemalloc/internal/hook.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/witness.h" + +/* + * Translating the names of the 'i' functions: + * Abbreviations used in the first part of the function name (before + * alloc/dalloc) describe what that function accomplishes: + * a: arena (query) + * s: size (query, or sized deallocation) + * e: extent (query) + * p: aligned (allocates) + * vs: size (query, without knowing that the pointer is into the heap) + * r: rallocx implementation + * x: xallocx implementation + * Abbreviations used in the second part of the function name (after + * alloc/dalloc) describe the arguments it takes + * z: whether to return zeroed memory + * t: accepts a tcache_t * parameter + * m: accepts an arena_t * parameter + */ + +JEMALLOC_ALWAYS_INLINE arena_t * +iaalloc(tsdn_t *tsdn, const void *ptr) { + assert(ptr != NULL); + + return arena_aalloc(tsdn, ptr); +} + +JEMALLOC_ALWAYS_INLINE size_t +isalloc(tsdn_t *tsdn, const void *ptr) { + assert(ptr != NULL); + + return arena_salloc(tsdn, ptr); +} + +JEMALLOC_ALWAYS_INLINE void * +iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, + bool is_internal, arena_t *arena, bool slow_path) { + void *ret; + + assert(!is_internal || tcache == NULL); + assert(!is_internal || arena == NULL || arena_is_auto(arena)); + if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + } + + ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); + if (config_stats && is_internal && likely(ret != NULL)) { + arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); + } + return ret; +} + +JEMALLOC_ALWAYS_INLINE void * +ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) { + return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false, + NULL, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void * +ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, bool is_internal, arena_t *arena) { + void *ret; + + assert(usize != 0); + assert(usize == sz_sa2u(usize, alignment)); + assert(!is_internal || tcache == NULL); + assert(!is_internal || arena == NULL || arena_is_auto(arena)); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); + assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); + if (config_stats && is_internal && likely(ret != NULL)) { + arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); + } + return ret; +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena) { + return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena); +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) { + return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, + tcache_get(tsd), false, NULL); +} + +JEMALLOC_ALWAYS_INLINE size_t +ivsalloc(tsdn_t *tsdn, const void *ptr) { + return arena_vsalloc(tsdn, ptr); +} + +JEMALLOC_ALWAYS_INLINE void +idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx, + bool is_internal, bool slow_path) { + assert(ptr != NULL); + assert(!is_internal || tcache == NULL); + assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr))); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + if (config_stats && is_internal) { + arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr)); + } + if (!is_internal && !tsdn_null(tsdn) && + tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) { + assert(tcache == NULL); + } + arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void +idalloc(tsd_t *tsd, void *ptr) { + idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true); +} + +JEMALLOC_ALWAYS_INLINE void +isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + alloc_ctx_t *alloc_ctx, bool slow_path) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, + hook_ralloc_args_t *hook_args) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + void *p; + size_t usize, copysize; + + usize = sz_sa2u(size, alignment); + if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { + return NULL; + } + p = ipalloct(tsdn, usize, alignment, zero, tcache, arena); + if (p == NULL) { + return NULL; + } + /* + * Copy at most size bytes (not size+extra), since the caller has no + * expectation that the extra bytes will be reliably preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + memcpy(p, ptr, copysize); + hook_invoke_alloc(hook_args->is_realloc + ? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p, + hook_args->args); + hook_invoke_dalloc(hook_args->is_realloc + ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); + isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); + return p; +} + +/* + * is_realloc threads through the knowledge of whether or not this call comes + * from je_realloc (as opposed to je_rallocx); this ensures that we pass the + * correct entry point into any hooks. + * Note that these functions are all force-inlined, so no actual bool gets + * passed-around anywhere. + */ +JEMALLOC_ALWAYS_INLINE void * +iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args) +{ + assert(ptr != NULL); + assert(size != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* + * Existing object alignment is inadequate; allocate new space + * and copy. + */ + return iralloct_realign(tsdn, ptr, oldsize, size, alignment, + zero, tcache, arena, hook_args); + } + + return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero, + tcache, hook_args); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero, hook_ralloc_args_t *hook_args) { + return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero, + tcache_get(tsd), NULL, hook_args); +} + +JEMALLOC_ALWAYS_INLINE bool +ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero, size_t *newsize) { + assert(ptr != NULL); + assert(size != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* Existing object alignment is inadequate. */ + *newsize = oldsize; + return true; + } + + return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero, + newsize); +} + +JEMALLOC_ALWAYS_INLINE int +iget_defrag_hint(tsdn_t *tsdn, void* ptr) { + int defrag = 0; + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + szind_t szind; + bool is_slab; + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &is_slab); + if (likely(is_slab)) { + /* Small allocation. */ + extent_t *slab = iealloc(tsdn, ptr); + arena_t *arena = extent_arena_get(slab); + szind_t binind = extent_szind_get(slab); + unsigned binshard = extent_binshard_get(slab); + bin_t *bin = &arena->bins[binind].bin_shards[binshard]; + malloc_mutex_lock(tsdn, &bin->lock); + /* Don't bother moving allocations from the slab currently used for new allocations */ + if (slab != bin->slabcur) { + int free_in_slab = extent_nfree_get(slab); + if (free_in_slab) { + const bin_info_t *bin_info = &bin_infos[binind]; + /* Find number of non-full slabs and the number of regs in them */ + unsigned long curslabs = 0; + size_t curregs = 0; + /* Run on all bin shards (usually just one) */ + for (uint32_t i=0; i< bin_info->n_shards; i++) { + bin_t *bb = &arena->bins[binind].bin_shards[i]; + curslabs += bb->stats.nonfull_slabs; + /* Deduct the regs in full slabs (they're not part of the game) */ + unsigned long full_slabs = bb->stats.curslabs - bb->stats.nonfull_slabs; + curregs += bb->stats.curregs - full_slabs * bin_info->nregs; + if (bb->slabcur) { + /* Remove slabcur from the overall utilization (not a candidate to nove from) */ + curregs -= bin_info->nregs - extent_nfree_get(bb->slabcur); + curslabs -= 1; + } + } + /* Compare the utilization ratio of the slab in question to the total average + * among non-full slabs. To avoid precision loss in division, we do that by + * extrapolating the usage of the slab as if all slabs have the same usage. + * If this slab is less used than the average, we'll prefer to move the data + * to hopefully more used ones. To avoid stagnation when all slabs have the same + * utilization, we give additional 12.5% weight to the decision to defrag. */ + defrag = (bin_info->nregs - free_in_slab) * curslabs <= curregs + curregs / 8; + } + } + malloc_mutex_unlock(tsdn, &bin->lock); + } + return defrag; +} + +#endif /* JEMALLOC_INTERNAL_INLINES_C_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h new file mode 100644 index 0000000..d8ea06f --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h @@ -0,0 +1,114 @@ +#ifndef JEMALLOC_INTERNAL_MACROS_H +#define JEMALLOC_INTERNAL_MACROS_H + +#ifdef JEMALLOC_DEBUG +# define JEMALLOC_ALWAYS_INLINE static inline +#else +# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline +#endif +#ifdef _MSC_VER +# define inline _inline +#endif + +#define UNUSED JEMALLOC_ATTR(unused) + +#define ZU(z) ((size_t)z) +#define ZD(z) ((ssize_t)z) +#define QU(q) ((uint64_t)q) +#define QD(q) ((int64_t)q) + +#define KZU(z) ZU(z##ULL) +#define KZD(z) ZD(z##LL) +#define KQU(q) QU(q##ULL) +#define KQD(q) QI(q##LL) + +#ifndef __DECONST +# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) +#endif + +#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus) +# define restrict +#endif + +/* Various function pointers are static and immutable except during testing. */ +#ifdef JEMALLOC_JET +# define JET_MUTABLE +#else +# define JET_MUTABLE const +#endif + +#define JEMALLOC_VA_ARGS_HEAD(head, ...) head +#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__ + +#if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) \ + && defined(JEMALLOC_HAVE_ATTR) && (__GNUC__ >= 7) +#define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough); +#else +#define JEMALLOC_FALLTHROUGH /* falls through */ +#endif + +/* Diagnostic suppression macros */ +#if defined(_MSC_VER) && !defined(__clang__) +# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push)) +# define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop)) +# define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W)) +# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS +# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS +# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN +# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS +/* #pragma GCC diagnostic first appeared in gcc 4.6. */ +#elif (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && \ + (__GNUC_MINOR__ > 5)))) || defined(__clang__) +/* + * The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang + * diagnostic suppression macros and should not be used anywhere else. + */ +# define JEMALLOC_PRAGMA__(X) _Pragma(#X) +# define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push) +# define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop) +# define JEMALLOC_DIAGNOSTIC_IGNORE(W) \ + JEMALLOC_PRAGMA__(GCC diagnostic ignored W) + +/* + * The -Wmissing-field-initializers warning is buggy in GCC versions < 5.1 and + * all clang versions up to version 7 (currently trunk, unreleased). This macro + * suppresses the warning for the affected compiler versions only. + */ +# if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) || \ + defined(__clang__) +# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \ + JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers") +# else +# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS +# endif + +# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \ + JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits") +# define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \ + JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter") +# if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7) +# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \ + JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=") +# else +# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN +# endif +# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \ + JEMALLOC_DIAGNOSTIC_PUSH \ + JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER +#else +# define JEMALLOC_DIAGNOSTIC_PUSH +# define JEMALLOC_DIAGNOSTIC_POP +# define JEMALLOC_DIAGNOSTIC_IGNORE(W) +# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS +# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS +# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN +# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS +#endif + +/* + * Disables spurious diagnostics for all headers. Since these headers are not + * included by users directly, it does not affect their diagnostic settings. + */ +JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS + +#endif /* JEMALLOC_INTERNAL_MACROS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h new file mode 100644 index 0000000..e296c5a --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h @@ -0,0 +1,114 @@ +#ifndef JEMALLOC_INTERNAL_TYPES_H +#define JEMALLOC_INTERNAL_TYPES_H + +#include "jemalloc/internal/quantum.h" + +/* Page size index type. */ +typedef unsigned pszind_t; + +/* Size class index type. */ +typedef unsigned szind_t; + +/* Processor / core id type. */ +typedef int malloc_cpuid_t; + +/* + * Flags bits: + * + * a: arena + * t: tcache + * 0: unused + * z: zero + * n: alignment + * + * aaaaaaaa aaaatttt tttttttt 0znnnnnn + */ +#define MALLOCX_ARENA_BITS 12 +#define MALLOCX_TCACHE_BITS 12 +#define MALLOCX_LG_ALIGN_BITS 6 +#define MALLOCX_ARENA_SHIFT 20 +#define MALLOCX_TCACHE_SHIFT 8 +#define MALLOCX_ARENA_MASK \ + (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT) +/* NB: Arena index bias decreases the maximum number of arenas by 1. */ +#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1) +#define MALLOCX_TCACHE_MASK \ + (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT) +#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3) +#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1) +/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ +#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ + (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) +#define MALLOCX_ALIGN_GET(flags) \ + (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) +#define MALLOCX_ZERO_GET(flags) \ + ((bool)(flags & MALLOCX_ZERO)) + +#define MALLOCX_TCACHE_GET(flags) \ + (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2) +#define MALLOCX_ARENA_GET(flags) \ + (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1) + +/* Smallest size class to support. */ +#define TINY_MIN (1U << LG_TINY_MIN) + +#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) +#define LONG_MASK (LONG - 1) + +/* Return the smallest long multiple that is >= a. */ +#define LONG_CEILING(a) \ + (((a) + LONG_MASK) & ~LONG_MASK) + +#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) +#define PTR_MASK (SIZEOF_PTR - 1) + +/* Return the smallest (void *) multiple that is >= a. */ +#define PTR_CEILING(a) \ + (((a) + PTR_MASK) & ~PTR_MASK) + +/* + * Maximum size of L1 cache line. This is used to avoid cache line aliasing. + * In addition, this controls the spacing of cacheline-spaced size classes. + * + * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can + * only handle raw constants. + */ +#define LG_CACHELINE 6 +#define CACHELINE 64 +#define CACHELINE_MASK (CACHELINE - 1) + +/* Return the smallest cacheline multiple that is >= s. */ +#define CACHELINE_CEILING(s) \ + (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) + +/* Return the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2BASE(a, alignment) \ + ((void *)((uintptr_t)(a) & ((~(alignment)) + 1))) + +/* Return the offset between a and the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ + ((size_t)((uintptr_t)(a) & (alignment - 1))) + +/* Return the smallest alignment multiple that is >= s. */ +#define ALIGNMENT_CEILING(s, alignment) \ + (((s) + (alignment - 1)) & ((~(alignment)) + 1)) + +/* Declare a variable-length array. */ +#if __STDC_VERSION__ < 199901L +# ifdef _MSC_VER +# include +# define alloca _alloca +# else +# ifdef JEMALLOC_HAS_ALLOCA_H +# include +# else +# include +# endif +# endif +# define VARIABLE_ARRAY(type, name, count) \ + type *name = alloca(sizeof(type) * (count)) +#else +# define VARIABLE_ARRAY(type, name, count) type name[(count)] +#endif + +#endif /* JEMALLOC_INTERNAL_TYPES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in b/deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in new file mode 100644 index 0000000..3418cbf --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in @@ -0,0 +1,213 @@ +#ifndef JEMALLOC_PREAMBLE_H +#define JEMALLOC_PREAMBLE_H + +#include "jemalloc_internal_defs.h" +#include "jemalloc/internal/jemalloc_internal_decls.h" + +#ifdef JEMALLOC_UTRACE +#include +#endif + +#define JEMALLOC_NO_DEMANGLE +#ifdef JEMALLOC_JET +# undef JEMALLOC_IS_MALLOC +# define JEMALLOC_N(n) jet_##n +# include "jemalloc/internal/public_namespace.h" +# define JEMALLOC_NO_RENAME +# include "../jemalloc@install_suffix@.h" +# undef JEMALLOC_NO_RENAME +#else +# define JEMALLOC_N(n) @private_namespace@##n +# include "../jemalloc@install_suffix@.h" +#endif + +#if defined(JEMALLOC_OSATOMIC) +#include +#endif + +#ifdef JEMALLOC_ZONE +#include +#include +#include +#endif + +#include "jemalloc/internal/jemalloc_internal_macros.h" + +/* + * Note that the ordering matters here; the hook itself is name-mangled. We + * want the inclusion of hooks to happen early, so that we hook as much as + * possible. + */ +#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE +# ifndef JEMALLOC_JET +# include "jemalloc/internal/private_namespace.h" +# else +# include "jemalloc/internal/private_namespace_jet.h" +# endif +#endif +#include "jemalloc/internal/test_hooks.h" + +#ifdef JEMALLOC_DEFINE_MADVISE_FREE +# define JEMALLOC_MADV_FREE 8 +#endif + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +static const bool have_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; +static const bool have_madvise_huge = +#ifdef JEMALLOC_HAVE_MADVISE_HUGE + true +#else + false +#endif + ; +static const bool config_fill = +#ifdef JEMALLOC_FILL + true +#else + false +#endif + ; +static const bool config_lazy_lock = +#ifdef JEMALLOC_LAZY_LOCK + true +#else + false +#endif + ; +static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; +static const bool config_prof = +#ifdef JEMALLOC_PROF + true +#else + false +#endif + ; +static const bool config_prof_libgcc = +#ifdef JEMALLOC_PROF_LIBGCC + true +#else + false +#endif + ; +static const bool config_prof_libunwind = +#ifdef JEMALLOC_PROF_LIBUNWIND + true +#else + false +#endif + ; +static const bool maps_coalesce = +#ifdef JEMALLOC_MAPS_COALESCE + true +#else + false +#endif + ; +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; +static const bool config_tls = +#ifdef JEMALLOC_TLS + true +#else + false +#endif + ; +static const bool config_utrace = +#ifdef JEMALLOC_UTRACE + true +#else + false +#endif + ; +static const bool config_xmalloc = +#ifdef JEMALLOC_XMALLOC + true +#else + false +#endif + ; +static const bool config_cache_oblivious = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + true +#else + false +#endif + ; +/* + * Undocumented, for jemalloc development use only at the moment. See the note + * in jemalloc/internal/log.h. + */ +static const bool config_log = +#ifdef JEMALLOC_LOG + true +#else + false +#endif + ; +/* + * Are extra safety checks enabled; things like checking the size of sized + * deallocations, double-frees, etc. + */ +static const bool config_opt_safety_checks = +#ifdef JEMALLOC_OPT_SAFETY_CHECKS + true +#elif defined(JEMALLOC_DEBUG) + /* + * This lets us only guard safety checks by one flag instead of two; fast + * checks can guard solely by config_opt_safety_checks and run in debug mode + * too. + */ + true +#else + false +#endif + ; + +#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU) +/* Currently percpu_arena depends on sched_getcpu. */ +#define JEMALLOC_PERCPU_ARENA +#endif +static const bool have_percpu_arena = +#ifdef JEMALLOC_PERCPU_ARENA + true +#else + false +#endif + ; +/* + * Undocumented, and not recommended; the application should take full + * responsibility for tracking provenance. + */ +static const bool force_ivsalloc = +#ifdef JEMALLOC_FORCE_IVSALLOC + true +#else + false +#endif + ; +static const bool have_background_thread = +#ifdef JEMALLOC_BACKGROUND_THREAD + true +#else + false +#endif + ; + +#endif /* JEMALLOC_PREAMBLE_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/large_externs.h b/deps/jemalloc/include/jemalloc/internal/large_externs.h new file mode 100644 index 0000000..a05019e --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/large_externs.h @@ -0,0 +1,32 @@ +#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H +#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H + +#include "jemalloc/internal/hook.h" + +void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); +void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero); +bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, + size_t usize_max, bool zero); +void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, + size_t alignment, bool zero, tcache_t *tcache, + hook_ralloc_args_t *hook_args); + +typedef void (large_dalloc_junk_t)(void *, size_t); +extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk; + +typedef void (large_dalloc_maybe_junk_t)(void *, size_t); +extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk; + +void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent); +void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent); +void large_dalloc(tsdn_t *tsdn, extent_t *extent); +size_t large_salloc(tsdn_t *tsdn, const extent_t *extent); +prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent); +void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx); +void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent); + +nstime_t large_prof_alloc_time_get(const extent_t *extent); +void large_prof_alloc_time_set(extent_t *extent, nstime_t time); + +#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/log.h b/deps/jemalloc/include/jemalloc/internal/log.h new file mode 100644 index 0000000..6420858 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/log.h @@ -0,0 +1,115 @@ +#ifndef JEMALLOC_INTERNAL_LOG_H +#define JEMALLOC_INTERNAL_LOG_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" + +#ifdef JEMALLOC_LOG +# define JEMALLOC_LOG_VAR_BUFSIZE 1000 +#else +# define JEMALLOC_LOG_VAR_BUFSIZE 1 +#endif + +#define JEMALLOC_LOG_BUFSIZE 4096 + +/* + * The log malloc_conf option is a '|'-delimited list of log_var name segments + * which should be logged. The names are themselves hierarchical, with '.' as + * the delimiter (a "segment" is just a prefix in the log namespace). So, if + * you have: + * + * log("arena", "log msg for arena"); // 1 + * log("arena.a", "log msg for arena.a"); // 2 + * log("arena.b", "log msg for arena.b"); // 3 + * log("arena.a.a", "log msg for arena.a.a"); // 4 + * log("extent.a", "log msg for extent.a"); // 5 + * log("extent.b", "log msg for extent.b"); // 6 + * + * And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and + * 6 will print at runtime. You can enable logging from all log vars by + * writing "log=.". + * + * None of this should be regarded as a stable API for right now. It's intended + * as a debugging interface, to let us keep around some of our printf-debugging + * statements. + */ + +extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; +extern atomic_b_t log_init_done; + +typedef struct log_var_s log_var_t; +struct log_var_s { + /* + * Lowest bit is "inited", second lowest is "enabled". Putting them in + * a single word lets us avoid any fences on weak architectures. + */ + atomic_u_t state; + const char *name; +}; + +#define LOG_NOT_INITIALIZED 0U +#define LOG_INITIALIZED_NOT_ENABLED 1U +#define LOG_ENABLED 2U + +#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str} + +/* + * Returns the value we should assume for state (which is not necessarily + * accurate; if logging is done before logging has finished initializing, then + * we default to doing the safe thing by logging everything). + */ +unsigned log_var_update_state(log_var_t *log_var); + +/* We factor out the metadata management to allow us to test more easily. */ +#define log_do_begin(log_var) \ +if (config_log) { \ + unsigned log_state = atomic_load_u(&(log_var).state, \ + ATOMIC_RELAXED); \ + if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \ + log_state = log_var_update_state(&(log_var)); \ + assert(log_state != LOG_NOT_INITIALIZED); \ + } \ + if (log_state == LOG_ENABLED) { \ + { + /* User code executes here. */ +#define log_do_end(log_var) \ + } \ + } \ +} + +/* + * MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during + * preprocessing. To work around this, we take all potential extra arguments in + * a var-args functions. Since a varargs macro needs at least one argument in + * the "...", we accept the format string there, and require that the first + * argument in this "..." is a const char *. + */ +static inline void +log_impl_varargs(const char *name, ...) { + char buf[JEMALLOC_LOG_BUFSIZE]; + va_list ap; + + va_start(ap, name); + const char *format = va_arg(ap, const char *); + size_t dst_offset = 0; + dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name); + dst_offset += malloc_vsnprintf(buf + dst_offset, + JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap); + dst_offset += malloc_snprintf(buf + dst_offset, + JEMALLOC_LOG_BUFSIZE - dst_offset, "\n"); + va_end(ap); + + malloc_write(buf); +} + +/* Call as log("log.var.str", "format_string %d", arg_for_format_string); */ +#define LOG(log_var_str, ...) \ +do { \ + static log_var_t log_var = LOG_VAR_INIT(log_var_str); \ + log_do_begin(log_var) \ + log_impl_varargs((log_var).name, __VA_ARGS__); \ + log_do_end(log_var) \ +} while (0) + +#endif /* JEMALLOC_INTERNAL_LOG_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/malloc_io.h b/deps/jemalloc/include/jemalloc/internal/malloc_io.h new file mode 100644 index 0000000..1d1a414 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/malloc_io.h @@ -0,0 +1,102 @@ +#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H +#define JEMALLOC_INTERNAL_MALLOC_IO_H + +#ifdef _WIN32 +# ifdef _WIN64 +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "ll" +# else +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "" +# endif +# define FMTd32 "d" +# define FMTu32 "u" +# define FMTx32 "x" +# define FMTd64 FMT64_PREFIX "d" +# define FMTu64 FMT64_PREFIX "u" +# define FMTx64 FMT64_PREFIX "x" +# define FMTdPTR FMTPTR_PREFIX "d" +# define FMTuPTR FMTPTR_PREFIX "u" +# define FMTxPTR FMTPTR_PREFIX "x" +#else +# include +# define FMTd32 PRId32 +# define FMTu32 PRIu32 +# define FMTx32 PRIx32 +# define FMTd64 PRId64 +# define FMTu64 PRIu64 +# define FMTx64 PRIx64 +# define FMTdPTR PRIdPTR +# define FMTuPTR PRIuPTR +# define FMTxPTR PRIxPTR +#endif + +/* Size of stack-allocated buffer passed to buferror(). */ +#define BUFERROR_BUF 64 + +/* + * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be + * large enough for all possible uses within jemalloc. + */ +#define MALLOC_PRINTF_BUFSIZE 4096 + +int buferror(int err, char *buf, size_t buflen); +uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, + int base); +void malloc_write(const char *s); + +/* + * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating + * point math. + */ +size_t malloc_vsnprintf(char *str, size_t size, const char *format, + va_list ap); +size_t malloc_snprintf(char *str, size_t size, const char *format, ...) + JEMALLOC_FORMAT_PRINTF(3, 4); +/* + * The caller can set write_cb to null to choose to print with the + * je_malloc_message hook. + */ +void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, va_list ap); +void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); +void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); + +static inline ssize_t +malloc_write_fd(int fd, const void *buf, size_t count) { +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write) + /* + * Use syscall(2) rather than write(2) when possible in order to avoid + * the possibility of memory allocation within libc. This is necessary + * on FreeBSD; most operating systems do not have this problem though. + * + * syscall() returns long or int, depending on platform, so capture the + * result in the widest plausible type to avoid compiler warnings. + */ + long result = syscall(SYS_write, fd, buf, count); +#else + ssize_t result = (ssize_t)write(fd, buf, +#ifdef _WIN32 + (unsigned int) +#endif + count); +#endif + return (ssize_t)result; +} + +static inline ssize_t +malloc_read_fd(int fd, void *buf, size_t count) { +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) + long result = syscall(SYS_read, fd, buf, count); +#else + ssize_t result = read(fd, buf, +#ifdef _WIN32 + (unsigned int) +#endif + count); +#endif + return (ssize_t)result; +} + +#endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/mutex.h b/deps/jemalloc/include/jemalloc/internal/mutex.h new file mode 100644 index 0000000..7c24f07 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/mutex.h @@ -0,0 +1,288 @@ +#ifndef JEMALLOC_INTERNAL_MUTEX_H +#define JEMALLOC_INTERNAL_MUTEX_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/witness.h" + +typedef enum { + /* Can only acquire one mutex of a given witness rank at a time. */ + malloc_mutex_rank_exclusive, + /* + * Can acquire multiple mutexes of the same witness rank, but in + * address-ascending order only. + */ + malloc_mutex_address_ordered +} malloc_mutex_lock_order_t; + +typedef struct malloc_mutex_s malloc_mutex_t; +struct malloc_mutex_s { + union { + struct { + /* + * prof_data is defined first to reduce cacheline + * bouncing: the data is not touched by the mutex holder + * during unlocking, while might be modified by + * contenders. Having it before the mutex itself could + * avoid prefetching a modified cacheline (for the + * unlocking thread). + */ + mutex_prof_data_t prof_data; +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + SRWLOCK lock; +# else + CRITICAL_SECTION lock; +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock lock; +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) + pthread_mutex_t lock; + malloc_mutex_t *postponed_next; +#else + pthread_mutex_t lock; +#endif + /* + * Hint flag to avoid exclusive cache line contention + * during spin waiting + */ + atomic_b_t locked; + }; + /* + * We only touch witness when configured w/ debug. However we + * keep the field in a union when !debug so that we don't have + * to pollute the code base with #ifdefs, while avoid paying the + * memory cost. + */ +#if !defined(JEMALLOC_DEBUG) + witness_t witness; + malloc_mutex_lock_order_t lock_order; +#endif + }; + +#if defined(JEMALLOC_DEBUG) + witness_t witness; + malloc_mutex_lock_order_t lock_order; +#endif +}; + +/* + * Based on benchmark results, a fixed spin with this amount of retries works + * well for our critical sections. + */ +#define MALLOC_MUTEX_MAX_SPIN 250 + +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 +# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock)) +# else +# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock)) +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) +# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock)) +#else +# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0) +#endif + +#define LOCK_PROF_DATA_INITIALIZER \ + {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \ + ATOMIC_INIT(0), 0, NULL, 0} + +#ifdef _WIN32 +# define MALLOC_MUTEX_INITIALIZER +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) +# if defined(JEMALLOC_DEBUG) +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} +# else +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} +# endif +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) +# if (defined(JEMALLOC_DEBUG)) +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} +# else +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} +# endif + +#else +# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT +# if defined(JEMALLOC_DEBUG) +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0} +# else +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} +# endif +#endif + +#ifdef JEMALLOC_LAZY_LOCK +extern bool isthreaded; +#else +# undef isthreaded /* Undo private_namespace.h definition. */ +# define isthreaded true +#endif + +bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, + witness_rank_t rank, malloc_mutex_lock_order_t lock_order); +void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); +bool malloc_mutex_boot(void); +void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex); + +void malloc_mutex_lock_slow(malloc_mutex_t *mutex); + +static inline void +malloc_mutex_lock_final(malloc_mutex_t *mutex) { + MALLOC_MUTEX_LOCK(mutex); + atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED); +} + +static inline bool +malloc_mutex_trylock_final(malloc_mutex_t *mutex) { + return MALLOC_MUTEX_TRYLOCK(mutex); +} + +static inline void +mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) { + if (config_stats) { + mutex_prof_data_t *data = &mutex->prof_data; + data->n_lock_ops++; + if (data->prev_owner != tsdn) { + data->prev_owner = tsdn; + data->n_owner_switches++; + } + } +} + +/* Trylock: return false if the lock is successfully acquired. */ +static inline bool +malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); + if (isthreaded) { + if (malloc_mutex_trylock_final(mutex)) { + atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED); + return true; + } + mutex_owner_stats_update(tsdn, mutex); + } + witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); + + return false; +} + +/* Aggregate lock prof data. */ +static inline void +malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) { + nstime_add(&sum->tot_wait_time, &data->tot_wait_time); + if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) { + nstime_copy(&sum->max_wait_time, &data->max_wait_time); + } + + sum->n_wait_times += data->n_wait_times; + sum->n_spin_acquired += data->n_spin_acquired; + + if (sum->max_n_thds < data->max_n_thds) { + sum->max_n_thds = data->max_n_thds; + } + uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds, + ATOMIC_RELAXED); + uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32( + &data->n_waiting_thds, ATOMIC_RELAXED); + atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds, + ATOMIC_RELAXED); + sum->n_owner_switches += data->n_owner_switches; + sum->n_lock_ops += data->n_lock_ops; +} + +static inline void +malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); + if (isthreaded) { + if (malloc_mutex_trylock_final(mutex)) { + malloc_mutex_lock_slow(mutex); + atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED); + } + mutex_owner_stats_update(tsdn, mutex); + } + witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); +} + +static inline void +malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) { + atomic_store_b(&mutex->locked, false, ATOMIC_RELAXED); + witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); + if (isthreaded) { + MALLOC_MUTEX_UNLOCK(mutex); + } +} + +static inline void +malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); +} + +static inline void +malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); +} + +/* Copy the prof data from mutex for processing. */ +static inline void +malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data, + malloc_mutex_t *mutex) { + mutex_prof_data_t *source = &mutex->prof_data; + /* Can only read holding the mutex. */ + malloc_mutex_assert_owner(tsdn, mutex); + + /* + * Not *really* allowed (we shouldn't be doing non-atomic loads of + * atomic data), but the mutex protection makes this safe, and writing + * a member-for-member copy is tedious for this situation. + */ + *data = *source; + /* n_wait_thds is not reported (modified w/o locking). */ + atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED); +} + +static inline void +malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data, + malloc_mutex_t *mutex) { + mutex_prof_data_t *source = &mutex->prof_data; + /* Can only read holding the mutex. */ + malloc_mutex_assert_owner(tsdn, mutex); + + nstime_add(&data->tot_wait_time, &source->tot_wait_time); + if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) { + nstime_copy(&data->max_wait_time, &source->max_wait_time); + } + data->n_wait_times += source->n_wait_times; + data->n_spin_acquired += source->n_spin_acquired; + if (data->max_n_thds < source->max_n_thds) { + data->max_n_thds = source->max_n_thds; + } + /* n_wait_thds is not reported. */ + atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED); + data->n_owner_switches += source->n_owner_switches; + data->n_lock_ops += source->n_lock_ops; +} + +#endif /* JEMALLOC_INTERNAL_MUTEX_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/mutex_pool.h b/deps/jemalloc/include/jemalloc/internal/mutex_pool.h new file mode 100644 index 0000000..726cece --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/mutex_pool.h @@ -0,0 +1,94 @@ +#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H +#define JEMALLOC_INTERNAL_MUTEX_POOL_H + +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/witness.h" + +/* We do mod reductions by this value, so it should be kept a power of 2. */ +#define MUTEX_POOL_SIZE 256 + +typedef struct mutex_pool_s mutex_pool_t; +struct mutex_pool_s { + malloc_mutex_t mutexes[MUTEX_POOL_SIZE]; +}; + +bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank); + +/* Internal helper - not meant to be called outside this module. */ +static inline malloc_mutex_t * +mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) { + size_t hash_result[2]; + hash(&key, sizeof(key), 0xd50dcc1b, hash_result); + return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE]; +} + +static inline void +mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) { + for (int i = 0; i < MUTEX_POOL_SIZE; i++) { + malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]); + } +} + +/* + * Note that a mutex pool doesn't work exactly the way an embdedded mutex would. + * You're not allowed to acquire mutexes in the pool one at a time. You have to + * acquire all the mutexes you'll need in a single function call, and then + * release them all in a single function call. + */ + +static inline void +mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { + mutex_pool_assert_not_held(tsdn, pool); + + malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); + malloc_mutex_lock(tsdn, mutex); +} + +static inline void +mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { + malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); + malloc_mutex_unlock(tsdn, mutex); + + mutex_pool_assert_not_held(tsdn, pool); +} + +static inline void +mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, + uintptr_t key2) { + mutex_pool_assert_not_held(tsdn, pool); + + malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); + malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); + if ((uintptr_t)mutex1 < (uintptr_t)mutex2) { + malloc_mutex_lock(tsdn, mutex1); + malloc_mutex_lock(tsdn, mutex2); + } else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) { + malloc_mutex_lock(tsdn, mutex1); + } else { + malloc_mutex_lock(tsdn, mutex2); + malloc_mutex_lock(tsdn, mutex1); + } +} + +static inline void +mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, + uintptr_t key2) { + malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); + malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); + if (mutex1 == mutex2) { + malloc_mutex_unlock(tsdn, mutex1); + } else { + malloc_mutex_unlock(tsdn, mutex1); + malloc_mutex_unlock(tsdn, mutex2); + } + + mutex_pool_assert_not_held(tsdn, pool); +} + +static inline void +mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { + malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key)); +} + +#endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/mutex_prof.h b/deps/jemalloc/include/jemalloc/internal/mutex_prof.h new file mode 100644 index 0000000..2cb8fb0 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/mutex_prof.h @@ -0,0 +1,108 @@ +#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H +#define JEMALLOC_INTERNAL_MUTEX_PROF_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/tsd_types.h" + +#define MUTEX_PROF_GLOBAL_MUTEXES \ + OP(background_thread) \ + OP(ctl) \ + OP(prof) + +typedef enum { +#define OP(mtx) global_prof_mutex_##mtx, + MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + mutex_prof_num_global_mutexes +} mutex_prof_global_ind_t; + +#define MUTEX_PROF_ARENA_MUTEXES \ + OP(large) \ + OP(extent_avail) \ + OP(extents_dirty) \ + OP(extents_muzzy) \ + OP(extents_retained) \ + OP(decay_dirty) \ + OP(decay_muzzy) \ + OP(base) \ + OP(tcache_list) + +typedef enum { +#define OP(mtx) arena_prof_mutex_##mtx, + MUTEX_PROF_ARENA_MUTEXES +#undef OP + mutex_prof_num_arena_mutexes +} mutex_prof_arena_ind_t; + +/* + * The forth parameter is a boolean value that is true for derived rate counters + * and false for real ones. + */ +#define MUTEX_PROF_UINT64_COUNTERS \ + OP(num_ops, uint64_t, "n_lock_ops", false, num_ops) \ + OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops) \ + OP(num_wait, uint64_t, "n_waiting", false, num_wait) \ + OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait) \ + OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq) \ + OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq) \ + OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch) \ + OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch) \ + OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time) \ + OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time) \ + OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time) + +#define MUTEX_PROF_UINT32_COUNTERS \ + OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds) + +#define MUTEX_PROF_COUNTERS \ + MUTEX_PROF_UINT64_COUNTERS \ + MUTEX_PROF_UINT32_COUNTERS + +#define OP(counter, type, human, derived, base_counter) mutex_counter_##counter, + +#define COUNTER_ENUM(counter_list, t) \ + typedef enum { \ + counter_list \ + mutex_prof_num_##t##_counters \ + } mutex_prof_##t##_counter_ind_t; + +COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t) +COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t) + +#undef COUNTER_ENUM +#undef OP + +typedef struct { + /* + * Counters touched on the slow path, i.e. when there is lock + * contention. We update them once we have the lock. + */ + /* Total time (in nano seconds) spent waiting on this mutex. */ + nstime_t tot_wait_time; + /* Max time (in nano seconds) spent on a single lock operation. */ + nstime_t max_wait_time; + /* # of times have to wait for this mutex (after spinning). */ + uint64_t n_wait_times; + /* # of times acquired the mutex through local spinning. */ + uint64_t n_spin_acquired; + /* Max # of threads waiting for the mutex at the same time. */ + uint32_t max_n_thds; + /* Current # of threads waiting on the lock. Atomic synced. */ + atomic_u32_t n_waiting_thds; + + /* + * Data touched on the fast path. These are modified right after we + * grab the lock, so it's placed closest to the end (i.e. right before + * the lock) so that we have a higher chance of them being on the same + * cacheline. + */ + /* # of times the mutex holder is different than the previous one. */ + uint64_t n_owner_switches; + /* Previous mutex holder, to facilitate n_owner_switches. */ + tsdn_t *prev_owner; + /* # of lock() operations in total. */ + uint64_t n_lock_ops; +} mutex_prof_data_t; + +#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/nstime.h b/deps/jemalloc/include/jemalloc/internal/nstime.h new file mode 100644 index 0000000..17c177c --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/nstime.h @@ -0,0 +1,34 @@ +#ifndef JEMALLOC_INTERNAL_NSTIME_H +#define JEMALLOC_INTERNAL_NSTIME_H + +/* Maximum supported number of seconds (~584 years). */ +#define NSTIME_SEC_MAX KQU(18446744072) +#define NSTIME_ZERO_INITIALIZER {0} + +typedef struct { + uint64_t ns; +} nstime_t; + +void nstime_init(nstime_t *time, uint64_t ns); +void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); +uint64_t nstime_ns(const nstime_t *time); +uint64_t nstime_sec(const nstime_t *time); +uint64_t nstime_msec(const nstime_t *time); +uint64_t nstime_nsec(const nstime_t *time); +void nstime_copy(nstime_t *time, const nstime_t *source); +int nstime_compare(const nstime_t *a, const nstime_t *b); +void nstime_add(nstime_t *time, const nstime_t *addend); +void nstime_iadd(nstime_t *time, uint64_t addend); +void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); +void nstime_isubtract(nstime_t *time, uint64_t subtrahend); +void nstime_imultiply(nstime_t *time, uint64_t multiplier); +void nstime_idivide(nstime_t *time, uint64_t divisor); +uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); + +typedef bool (nstime_monotonic_t)(void); +extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic; + +typedef bool (nstime_update_t)(nstime_t *); +extern nstime_update_t *JET_MUTABLE nstime_update; + +#endif /* JEMALLOC_INTERNAL_NSTIME_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/pages.h b/deps/jemalloc/include/jemalloc/internal/pages.h new file mode 100644 index 0000000..7dae633 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/pages.h @@ -0,0 +1,88 @@ +#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H +#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H + +/* Page size. LG_PAGE is determined by the configure script. */ +#ifdef PAGE_MASK +# undef PAGE_MASK +#endif +#define PAGE ((size_t)(1U << LG_PAGE)) +#define PAGE_MASK ((size_t)(PAGE - 1)) +/* Return the page base address for the page containing address a. */ +#define PAGE_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~PAGE_MASK)) +/* Return the smallest pagesize multiple that is >= s. */ +#define PAGE_CEILING(s) \ + (((s) + PAGE_MASK) & ~PAGE_MASK) + +/* Huge page size. LG_HUGEPAGE is determined by the configure script. */ +#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE)) +#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1)) +/* Return the huge page base address for the huge page containing address a. */ +#define HUGEPAGE_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK)) +/* Return the smallest pagesize multiple that is >= s. */ +#define HUGEPAGE_CEILING(s) \ + (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK) + +/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */ +#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE) +# define PAGES_CAN_PURGE_LAZY +#endif +/* + * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported. + * + * The only supported way to hard-purge on Windows is to decommit and then + * re-commit, but doing so is racy, and if re-commit fails it's a pain to + * propagate the "poisoned" memory state. Since we typically decommit as the + * next step after purging on Windows anyway, there's no point in adding such + * complexity. + */ +#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ + defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \ + defined(JEMALLOC_MAPS_COALESCE)) +# define PAGES_CAN_PURGE_FORCED +#endif + +static const bool pages_can_purge_lazy = +#ifdef PAGES_CAN_PURGE_LAZY + true +#else + false +#endif + ; +static const bool pages_can_purge_forced = +#ifdef PAGES_CAN_PURGE_FORCED + true +#else + false +#endif + ; + +typedef enum { + thp_mode_default = 0, /* Do not change hugepage settings. */ + thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */ + thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */ + + thp_mode_names_limit = 3, /* Used for option processing. */ + thp_mode_not_supported = 3 /* No THP support detected. */ +} thp_mode_t; + +#define THP_MODE_DEFAULT thp_mode_default +extern thp_mode_t opt_thp; +extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */ +extern const char *thp_mode_names[]; + +void *pages_map(void *addr, size_t size, size_t alignment, bool *commit); +void pages_unmap(void *addr, size_t size); +bool pages_commit(void *addr, size_t size); +bool pages_decommit(void *addr, size_t size); +bool pages_purge_lazy(void *addr, size_t size); +bool pages_purge_forced(void *addr, size_t size); +bool pages_huge(void *addr, size_t size); +bool pages_nohuge(void *addr, size_t size); +bool pages_dontdump(void *addr, size_t size); +bool pages_dodump(void *addr, size_t size); +bool pages_boot(void); +void pages_set_thp_state (void *ptr, size_t size); + +#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/ph.h b/deps/jemalloc/include/jemalloc/internal/ph.h new file mode 100644 index 0000000..84d6778 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/ph.h @@ -0,0 +1,391 @@ +/* + * A Pairing Heap implementation. + * + * "The Pairing Heap: A New Form of Self-Adjusting Heap" + * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf + * + * With auxiliary twopass list, described in a follow on paper. + * + * "Pairing Heaps: Experiments and Analysis" + * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf + * + ******************************************************************************* + */ + +#ifndef PH_H_ +#define PH_H_ + +/* Node structure. */ +#define phn(a_type) \ +struct { \ + a_type *phn_prev; \ + a_type *phn_next; \ + a_type *phn_lchild; \ +} + +/* Root structure. */ +#define ph(a_type) \ +struct { \ + a_type *ph_root; \ +} + +/* Internal utility macros. */ +#define phn_lchild_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_lchild) +#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \ + a_phn->a_field.phn_lchild = a_lchild; \ +} while (0) + +#define phn_next_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_next) +#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \ + a_phn->a_field.phn_prev = a_prev; \ +} while (0) + +#define phn_prev_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_prev) +#define phn_next_set(a_type, a_field, a_phn, a_next) do { \ + a_phn->a_field.phn_next = a_next; \ +} while (0) + +#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \ + a_type *phn0child; \ + \ + assert(a_phn0 != NULL); \ + assert(a_phn1 != NULL); \ + assert(a_cmp(a_phn0, a_phn1) <= 0); \ + \ + phn_prev_set(a_type, a_field, a_phn1, a_phn0); \ + phn0child = phn_lchild_get(a_type, a_field, a_phn0); \ + phn_next_set(a_type, a_field, a_phn1, phn0child); \ + if (phn0child != NULL) { \ + phn_prev_set(a_type, a_field, phn0child, a_phn1); \ + } \ + phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \ +} while (0) + +#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \ + if (a_phn0 == NULL) { \ + r_phn = a_phn1; \ + } else if (a_phn1 == NULL) { \ + r_phn = a_phn0; \ + } else if (a_cmp(a_phn0, a_phn1) < 0) { \ + phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \ + a_cmp); \ + r_phn = a_phn0; \ + } else { \ + phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \ + a_cmp); \ + r_phn = a_phn1; \ + } \ +} while (0) + +#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \ + a_type *head = NULL; \ + a_type *tail = NULL; \ + a_type *phn0 = a_phn; \ + a_type *phn1 = phn_next_get(a_type, a_field, phn0); \ + \ + /* \ + * Multipass merge, wherein the first two elements of a FIFO \ + * are repeatedly merged, and each result is appended to the \ + * singly linked FIFO, until the FIFO contains only a single \ + * element. We start with a sibling list but no reference to \ + * its tail, so we do a single pass over the sibling list to \ + * populate the FIFO. \ + */ \ + if (phn1 != NULL) { \ + a_type *phnrest = phn_next_get(a_type, a_field, phn1); \ + if (phnrest != NULL) { \ + phn_prev_set(a_type, a_field, phnrest, NULL); \ + } \ + phn_prev_set(a_type, a_field, phn0, NULL); \ + phn_next_set(a_type, a_field, phn0, NULL); \ + phn_prev_set(a_type, a_field, phn1, NULL); \ + phn_next_set(a_type, a_field, phn1, NULL); \ + phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \ + head = tail = phn0; \ + phn0 = phnrest; \ + while (phn0 != NULL) { \ + phn1 = phn_next_get(a_type, a_field, phn0); \ + if (phn1 != NULL) { \ + phnrest = phn_next_get(a_type, a_field, \ + phn1); \ + if (phnrest != NULL) { \ + phn_prev_set(a_type, a_field, \ + phnrest, NULL); \ + } \ + phn_prev_set(a_type, a_field, phn0, \ + NULL); \ + phn_next_set(a_type, a_field, phn0, \ + NULL); \ + phn_prev_set(a_type, a_field, phn1, \ + NULL); \ + phn_next_set(a_type, a_field, phn1, \ + NULL); \ + phn_merge(a_type, a_field, phn0, phn1, \ + a_cmp, phn0); \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = phnrest; \ + } else { \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = NULL; \ + } \ + } \ + phn0 = head; \ + phn1 = phn_next_get(a_type, a_field, phn0); \ + if (phn1 != NULL) { \ + while (true) { \ + head = phn_next_get(a_type, a_field, \ + phn1); \ + assert(phn_prev_get(a_type, a_field, \ + phn0) == NULL); \ + phn_next_set(a_type, a_field, phn0, \ + NULL); \ + assert(phn_prev_get(a_type, a_field, \ + phn1) == NULL); \ + phn_next_set(a_type, a_field, phn1, \ + NULL); \ + phn_merge(a_type, a_field, phn0, phn1, \ + a_cmp, phn0); \ + if (head == NULL) { \ + break; \ + } \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = head; \ + phn1 = phn_next_get(a_type, a_field, \ + phn0); \ + } \ + } \ + } \ + r_phn = phn0; \ +} while (0) + +#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \ + a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \ + if (phn != NULL) { \ + phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \ + phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \ + phn_prev_set(a_type, a_field, phn, NULL); \ + ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \ + assert(phn_next_get(a_type, a_field, phn) == NULL); \ + phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \ + a_ph->ph_root); \ + } \ +} while (0) + +#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \ + a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \ + if (lchild == NULL) { \ + r_phn = NULL; \ + } else { \ + ph_merge_siblings(a_type, a_field, lchild, a_cmp, \ + r_phn); \ + } \ +} while (0) + +/* + * The ph_proto() macro generates function prototypes that correspond to the + * functions generated by an equivalently parameterized call to ph_gen(). + */ +#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \ +a_attr void a_prefix##new(a_ph_type *ph); \ +a_attr bool a_prefix##empty(a_ph_type *ph); \ +a_attr a_type *a_prefix##first(a_ph_type *ph); \ +a_attr a_type *a_prefix##any(a_ph_type *ph); \ +a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \ +a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \ +a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \ +a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn); + +/* + * The ph_gen() macro generates a type-specific pairing heap implementation, + * based on the above cpp macros. + */ +#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \ +a_attr void \ +a_prefix##new(a_ph_type *ph) { \ + memset(ph, 0, sizeof(ph(a_type))); \ +} \ +a_attr bool \ +a_prefix##empty(a_ph_type *ph) { \ + return (ph->ph_root == NULL); \ +} \ +a_attr a_type * \ +a_prefix##first(a_ph_type *ph) { \ + if (ph->ph_root == NULL) { \ + return NULL; \ + } \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + return ph->ph_root; \ +} \ +a_attr a_type * \ +a_prefix##any(a_ph_type *ph) { \ + if (ph->ph_root == NULL) { \ + return NULL; \ + } \ + a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \ + if (aux != NULL) { \ + return aux; \ + } \ + return ph->ph_root; \ +} \ +a_attr void \ +a_prefix##insert(a_ph_type *ph, a_type *phn) { \ + memset(&phn->a_field, 0, sizeof(phn(a_type))); \ + \ + /* \ + * Treat the root as an aux list during insertion, and lazily \ + * merge during a_prefix##remove_first(). For elements that \ + * are inserted, then removed via a_prefix##remove() before the \ + * aux list is ever processed, this makes insert/remove \ + * constant-time, whereas eager merging would make insert \ + * O(log n). \ + */ \ + if (ph->ph_root == NULL) { \ + ph->ph_root = phn; \ + } else { \ + phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \ + a_field, ph->ph_root)); \ + if (phn_next_get(a_type, a_field, ph->ph_root) != \ + NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, ph->ph_root), \ + phn); \ + } \ + phn_prev_set(a_type, a_field, phn, ph->ph_root); \ + phn_next_set(a_type, a_field, ph->ph_root, phn); \ + } \ +} \ +a_attr a_type * \ +a_prefix##remove_first(a_ph_type *ph) { \ + a_type *ret; \ + \ + if (ph->ph_root == NULL) { \ + return NULL; \ + } \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + \ + ret = ph->ph_root; \ + \ + ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ + ph->ph_root); \ + \ + return ret; \ +} \ +a_attr a_type * \ +a_prefix##remove_any(a_ph_type *ph) { \ + /* \ + * Remove the most recently inserted aux list element, or the \ + * root if the aux list is empty. This has the effect of \ + * behaving as a LIFO (and insertion/removal is therefore \ + * constant-time) if a_prefix##[remove_]first() are never \ + * called. \ + */ \ + if (ph->ph_root == NULL) { \ + return NULL; \ + } \ + a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \ + if (ret != NULL) { \ + a_type *aux = phn_next_get(a_type, a_field, ret); \ + phn_next_set(a_type, a_field, ph->ph_root, aux); \ + if (aux != NULL) { \ + phn_prev_set(a_type, a_field, aux, \ + ph->ph_root); \ + } \ + return ret; \ + } \ + ret = ph->ph_root; \ + ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ + ph->ph_root); \ + return ret; \ +} \ +a_attr void \ +a_prefix##remove(a_ph_type *ph, a_type *phn) { \ + a_type *replace, *parent; \ + \ + if (ph->ph_root == phn) { \ + /* \ + * We can delete from aux list without merging it, but \ + * we need to merge if we are dealing with the root \ + * node and it has children. \ + */ \ + if (phn_lchild_get(a_type, a_field, phn) == NULL) { \ + ph->ph_root = phn_next_get(a_type, a_field, \ + phn); \ + if (ph->ph_root != NULL) { \ + phn_prev_set(a_type, a_field, \ + ph->ph_root, NULL); \ + } \ + return; \ + } \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + if (ph->ph_root == phn) { \ + ph_merge_children(a_type, a_field, ph->ph_root, \ + a_cmp, ph->ph_root); \ + return; \ + } \ + } \ + \ + /* Get parent (if phn is leftmost child) before mutating. */ \ + if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \ + if (phn_lchild_get(a_type, a_field, parent) != phn) { \ + parent = NULL; \ + } \ + } \ + /* Find a possible replacement node, and link to parent. */ \ + ph_merge_children(a_type, a_field, phn, a_cmp, replace); \ + /* Set next/prev for sibling linked list. */ \ + if (replace != NULL) { \ + if (parent != NULL) { \ + phn_prev_set(a_type, a_field, replace, parent); \ + phn_lchild_set(a_type, a_field, parent, \ + replace); \ + } else { \ + phn_prev_set(a_type, a_field, replace, \ + phn_prev_get(a_type, a_field, phn)); \ + if (phn_prev_get(a_type, a_field, phn) != \ + NULL) { \ + phn_next_set(a_type, a_field, \ + phn_prev_get(a_type, a_field, phn), \ + replace); \ + } \ + } \ + phn_next_set(a_type, a_field, replace, \ + phn_next_get(a_type, a_field, phn)); \ + if (phn_next_get(a_type, a_field, phn) != NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, phn), \ + replace); \ + } \ + } else { \ + if (parent != NULL) { \ + a_type *next = phn_next_get(a_type, a_field, \ + phn); \ + phn_lchild_set(a_type, a_field, parent, next); \ + if (next != NULL) { \ + phn_prev_set(a_type, a_field, next, \ + parent); \ + } \ + } else { \ + assert(phn_prev_get(a_type, a_field, phn) != \ + NULL); \ + phn_next_set(a_type, a_field, \ + phn_prev_get(a_type, a_field, phn), \ + phn_next_get(a_type, a_field, phn)); \ + } \ + if (phn_next_get(a_type, a_field, phn) != NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, phn), \ + phn_prev_get(a_type, a_field, phn)); \ + } \ + } \ +} + +#endif /* PH_H_ */ diff --git a/deps/jemalloc/include/jemalloc/internal/private_namespace.sh b/deps/jemalloc/include/jemalloc/internal/private_namespace.sh new file mode 100755 index 0000000..6ef1346 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/private_namespace.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +for symbol in `cat "$@"` ; do + echo "#define ${symbol} JEMALLOC_N(${symbol})" +done diff --git a/deps/jemalloc/include/jemalloc/internal/private_symbols.sh b/deps/jemalloc/include/jemalloc/internal/private_symbols.sh new file mode 100755 index 0000000..442a259 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/private_symbols.sh @@ -0,0 +1,51 @@ +#!/bin/sh +# +# Generate private_symbols[_jet].awk. +# +# Usage: private_symbols.sh * +# +# is typically "" or "_". + +sym_prefix=$1 +shift + +cat <' output. +# +# Handle lines like: +# 0000000000000008 D opt_junk +# 0000000000007574 T malloc_initialized +(NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) { + print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix)) +} + +# Process 'dumpbin /SYMBOLS ' output. +# +# Handle lines like: +# 353 00008098 SECT4 notype External | opt_junk +# 3F1 00000000 SECT7 notype () External | malloc_initialized +($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) { + print $NF +} +EOF diff --git a/deps/jemalloc/include/jemalloc/internal/prng.h b/deps/jemalloc/include/jemalloc/internal/prng.h new file mode 100644 index 0000000..15cc2d1 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/prng.h @@ -0,0 +1,185 @@ +#ifndef JEMALLOC_INTERNAL_PRNG_H +#define JEMALLOC_INTERNAL_PRNG_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bit_util.h" + +/* + * Simple linear congruential pseudo-random number generator: + * + * prng(y) = (a*x + c) % m + * + * where the following constants ensure maximal period: + * + * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. + * c == Odd number (relatively prime to 2^n). + * m == 2^32 + * + * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. + * + * This choice of m has the disadvantage that the quality of the bits is + * proportional to bit position. For example, the lowest bit has a cycle of 2, + * the next has a cycle of 4, etc. For this reason, we prefer to use the upper + * bits. + */ + +/******************************************************************************/ +/* INTERNAL DEFINITIONS -- IGNORE */ +/******************************************************************************/ +#define PRNG_A_32 UINT32_C(1103515241) +#define PRNG_C_32 UINT32_C(12347) + +#define PRNG_A_64 UINT64_C(6364136223846793005) +#define PRNG_C_64 UINT64_C(1442695040888963407) + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_state_next_u32(uint32_t state) { + return (state * PRNG_A_32) + PRNG_C_32; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_state_next_u64(uint64_t state) { + return (state * PRNG_A_64) + PRNG_C_64; +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_state_next_zu(size_t state) { +#if LG_SIZEOF_PTR == 2 + return (state * PRNG_A_32) + PRNG_C_32; +#elif LG_SIZEOF_PTR == 3 + return (state * PRNG_A_64) + PRNG_C_64; +#else +#error Unsupported pointer size +#endif +} + +/******************************************************************************/ +/* BEGIN PUBLIC API */ +/******************************************************************************/ + +/* + * The prng_lg_range functions give a uniform int in the half-open range [0, + * 2**lg_range). If atomic is true, they do so safely from multiple threads. + * Multithreaded 64-bit prngs aren't supported. + */ + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) { + uint32_t ret, state0, state1; + + assert(lg_range > 0); + assert(lg_range <= 32); + + state0 = atomic_load_u32(state, ATOMIC_RELAXED); + + if (atomic) { + do { + state1 = prng_state_next_u32(state0); + } while (!atomic_compare_exchange_weak_u32(state, &state0, + state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); + } else { + state1 = prng_state_next_u32(state0); + atomic_store_u32(state, state1, ATOMIC_RELAXED); + } + ret = state1 >> (32 - lg_range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_lg_range_u64(uint64_t *state, unsigned lg_range) { + uint64_t ret, state1; + + assert(lg_range > 0); + assert(lg_range <= 64); + + state1 = prng_state_next_u64(*state); + *state = state1; + ret = state1 >> (64 - lg_range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) { + size_t ret, state0, state1; + + assert(lg_range > 0); + assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); + + state0 = atomic_load_zu(state, ATOMIC_RELAXED); + + if (atomic) { + do { + state1 = prng_state_next_zu(state0); + } while (atomic_compare_exchange_weak_zu(state, &state0, + state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); + } else { + state1 = prng_state_next_zu(state0); + atomic_store_zu(state, state1, ATOMIC_RELAXED); + } + ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); + + return ret; +} + +/* + * The prng_range functions behave like the prng_lg_range, but return a result + * in [0, range) instead of [0, 2**lg_range). + */ + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) { + uint32_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_u32(state, lg_range, atomic); + } while (ret >= range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_range_u64(uint64_t *state, uint64_t range) { + uint64_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_u64(state, lg_range); + } while (ret >= range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) { + size_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_zu(state, lg_range, atomic); + } while (ret >= range); + + return ret; +} + +#endif /* JEMALLOC_INTERNAL_PRNG_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/prof_externs.h b/deps/jemalloc/include/jemalloc/internal/prof_externs.h new file mode 100644 index 0000000..094f3e1 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/prof_externs.h @@ -0,0 +1,105 @@ +#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H +#define JEMALLOC_INTERNAL_PROF_EXTERNS_H + +#include "jemalloc/internal/mutex.h" + +extern malloc_mutex_t bt2gctx_mtx; + +extern bool opt_prof; +extern bool opt_prof_active; +extern bool opt_prof_thread_active_init; +extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ +extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ +extern bool opt_prof_gdump; /* High-water memory dumping. */ +extern bool opt_prof_final; /* Final profile dumping. */ +extern bool opt_prof_leak; /* Dump leak summary at exit. */ +extern bool opt_prof_accum; /* Report cumulative bytes. */ +extern bool opt_prof_log; /* Turn logging on at boot. */ +extern char opt_prof_prefix[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + +/* Accessed via prof_active_[gs]et{_unlocked,}(). */ +extern bool prof_active; + +/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ +extern bool prof_gdump_val; + +/* + * Profile dump interval, measured in bytes allocated. Each arena triggers a + * profile dump when it reaches this threshold. The effect is that the + * interval between profile dumps averages prof_interval, though the actual + * interval between dumps will tend to be sporadic, and the interval will be a + * maximum of approximately (prof_interval * narenas). + */ +extern uint64_t prof_interval; + +/* + * Initialized as opt_lg_prof_sample, and potentially modified during profiling + * resets. + */ +extern size_t lg_prof_sample; + +void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); +void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void bt_init(prof_bt_t *bt, void **vec); +void prof_backtrace(prof_bt_t *bt); +prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); +#ifdef JEMALLOC_JET +size_t prof_tdata_count(void); +size_t prof_bt_count(void); +#endif +typedef int (prof_dump_open_t)(bool, const char *); +extern prof_dump_open_t *JET_MUTABLE prof_dump_open; + +typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); +extern prof_dump_header_t *JET_MUTABLE prof_dump_header; +#ifdef JEMALLOC_JET +void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, + uint64_t *accumbytes); +#endif +bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum); +void prof_idump(tsdn_t *tsdn); +bool prof_mdump(tsd_t *tsd, const char *filename); +void prof_gdump(tsdn_t *tsdn); +prof_tdata_t *prof_tdata_init(tsd_t *tsd); +prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); +void prof_reset(tsd_t *tsd, size_t lg_sample); +void prof_tdata_cleanup(tsd_t *tsd); +bool prof_active_get(tsdn_t *tsdn); +bool prof_active_set(tsdn_t *tsdn, bool active); +const char *prof_thread_name_get(tsd_t *tsd); +int prof_thread_name_set(tsd_t *tsd, const char *thread_name); +bool prof_thread_active_get(tsd_t *tsd); +bool prof_thread_active_set(tsd_t *tsd, bool active); +bool prof_thread_active_init_get(tsdn_t *tsdn); +bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); +bool prof_gdump_get(tsdn_t *tsdn); +bool prof_gdump_set(tsdn_t *tsdn, bool active); +void prof_boot0(void); +void prof_boot1(void); +bool prof_boot2(tsd_t *tsd); +void prof_prefork0(tsdn_t *tsdn); +void prof_prefork1(tsdn_t *tsdn); +void prof_postfork_parent(tsdn_t *tsdn); +void prof_postfork_child(tsdn_t *tsdn); +void prof_sample_threshold_update(prof_tdata_t *tdata); + +bool prof_log_start(tsdn_t *tsdn, const char *filename); +bool prof_log_stop(tsdn_t *tsdn); +#ifdef JEMALLOC_JET +size_t prof_log_bt_count(void); +size_t prof_log_alloc_count(void); +size_t prof_log_thr_count(void); +bool prof_log_is_logging(void); +bool prof_log_rep_check(void); +void prof_log_dummy_set(bool new_value); +#endif + +#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h b/deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h new file mode 100644 index 0000000..471d985 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h @@ -0,0 +1,85 @@ +#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H +#define JEMALLOC_INTERNAL_PROF_INLINES_A_H + +#include "jemalloc/internal/mutex.h" + +static inline bool +prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, + uint64_t accumbytes) { + cassert(config_prof); + + bool overflow; + uint64_t a0, a1; + + /* + * If the application allocates fast enough (and/or if idump is slow + * enough), extreme overflow here (a1 >= prof_interval * 2) can cause + * idump trigger coalescing. This is an intentional mechanism that + * avoids rate-limiting allocation. + */ +#ifdef JEMALLOC_ATOMIC_U64 + a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); + do { + a1 = a0 + accumbytes; + assert(a1 >= a0); + overflow = (a1 >= prof_interval); + if (overflow) { + a1 %= prof_interval; + } + } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, + a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); +#else + malloc_mutex_lock(tsdn, &prof_accum->mtx); + a0 = prof_accum->accumbytes; + a1 = a0 + accumbytes; + overflow = (a1 >= prof_interval); + if (overflow) { + a1 %= prof_interval; + } + prof_accum->accumbytes = a1; + malloc_mutex_unlock(tsdn, &prof_accum->mtx); +#endif + return overflow; +} + +static inline void +prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, + size_t usize) { + cassert(config_prof); + + /* + * Cancel out as much of the excessive prof_accumbytes increase as + * possible without underflowing. Interval-triggered dumps occur + * slightly more often than intended as a result of incomplete + * canceling. + */ + uint64_t a0, a1; +#ifdef JEMALLOC_ATOMIC_U64 + a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); + do { + a1 = (a0 >= SC_LARGE_MINCLASS - usize) + ? a0 - (SC_LARGE_MINCLASS - usize) : 0; + } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, + a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); +#else + malloc_mutex_lock(tsdn, &prof_accum->mtx); + a0 = prof_accum->accumbytes; + a1 = (a0 >= SC_LARGE_MINCLASS - usize) + ? a0 - (SC_LARGE_MINCLASS - usize) : 0; + prof_accum->accumbytes = a1; + malloc_mutex_unlock(tsdn, &prof_accum->mtx); +#endif +} + +JEMALLOC_ALWAYS_INLINE bool +prof_active_get_unlocked(void) { + /* + * Even if opt_prof is true, sampling can be temporarily disabled by + * setting prof_active to false. No locking is used when reading + * prof_active in the fast path, so there are no guarantees regarding + * how long it will take for all threads to notice state changes. + */ + return prof_active; +} + +#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h b/deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h new file mode 100644 index 0000000..8ba8a1e --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h @@ -0,0 +1,250 @@ +#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H +#define JEMALLOC_INTERNAL_PROF_INLINES_B_H + +#include "jemalloc/internal/safety_check.h" +#include "jemalloc/internal/sz.h" + +JEMALLOC_ALWAYS_INLINE bool +prof_gdump_get_unlocked(void) { + /* + * No locking is used when reading prof_gdump_val in the fast path, so + * there are no guarantees regarding how long it will take for all + * threads to notice state changes. + */ + return prof_gdump_val; +} + +JEMALLOC_ALWAYS_INLINE prof_tdata_t * +prof_tdata_get(tsd_t *tsd, bool create) { + prof_tdata_t *tdata; + + cassert(config_prof); + + tdata = tsd_prof_tdata_get(tsd); + if (create) { + if (unlikely(tdata == NULL)) { + if (tsd_nominal(tsd)) { + tdata = prof_tdata_init(tsd); + tsd_prof_tdata_set(tsd, tdata); + } + } else if (unlikely(tdata->expired)) { + tdata = prof_tdata_reinit(tsd, tdata); + tsd_prof_tdata_set(tsd, tdata); + } + assert(tdata == NULL || tdata->attached); + } + + return tdata; +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { + cassert(config_prof); + assert(ptr != NULL); + + return arena_prof_tctx_get(tsdn, ptr, alloc_ctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_reset(tsdn, ptr, tctx); +} + +JEMALLOC_ALWAYS_INLINE nstime_t +prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { + cassert(config_prof); + assert(ptr != NULL); + + return arena_prof_alloc_time_get(tsdn, ptr, alloc_ctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx, + nstime_t t) { + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_alloc_time_set(tsdn, ptr, alloc_ctx, t); +} + +JEMALLOC_ALWAYS_INLINE bool +prof_sample_check(tsd_t *tsd, size_t usize, bool update) { + ssize_t check = update ? 0 : usize; + + int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd); + if (update) { + bytes_until_sample -= usize; + if (tsd_nominal(tsd)) { + tsd_bytes_until_sample_set(tsd, bytes_until_sample); + } + } + if (likely(bytes_until_sample >= check)) { + return true; + } + + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, + prof_tdata_t **tdata_out) { + prof_tdata_t *tdata; + + cassert(config_prof); + + /* Fastpath: no need to load tdata */ + if (likely(prof_sample_check(tsd, usize, update))) { + return true; + } + + bool booted = tsd_prof_tdata_get(tsd); + tdata = prof_tdata_get(tsd, true); + if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) { + tdata = NULL; + } + + if (tdata_out != NULL) { + *tdata_out = tdata; + } + + if (unlikely(tdata == NULL)) { + return true; + } + + /* + * If this was the first creation of tdata, then + * prof_tdata_get() reset bytes_until_sample, so decrement and + * check it again + */ + if (!booted && prof_sample_check(tsd, usize, update)) { + return true; + } + + if (tsd_reentrancy_level_get(tsd) > 0) { + return true; + } + /* Compute new sample threshold. */ + if (update) { + prof_sample_threshold_update(tdata); + } + return !tdata->active; +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) { + prof_tctx_t *ret; + prof_tdata_t *tdata; + prof_bt_t bt; + + assert(usize == sz_s2u(usize)); + + if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, + &tdata))) { + ret = (prof_tctx_t *)(uintptr_t)1U; + } else { + bt_init(&bt, tdata->vec); + prof_backtrace(&bt); + ret = prof_lookup(tsd, &bt); + } + + return ret; +} + +JEMALLOC_ALWAYS_INLINE void +prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx, + prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + assert(usize == isalloc(tsdn, ptr)); + + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { + prof_malloc_sample_object(tsdn, ptr, usize, tctx); + } else { + prof_tctx_set(tsdn, ptr, usize, alloc_ctx, + (prof_tctx_t *)(uintptr_t)1U); + } +} + +JEMALLOC_ALWAYS_INLINE void +prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, + bool prof_active, bool updated, const void *old_ptr, size_t old_usize, + prof_tctx_t *old_tctx) { + bool sampled, old_sampled, moved; + + cassert(config_prof); + assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); + + if (prof_active && !updated && ptr != NULL) { + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + if (prof_sample_accum_update(tsd, usize, true, NULL)) { + /* + * Don't sample. The usize passed to prof_alloc_prep() + * was larger than what actually got allocated, so a + * backtrace was captured for this allocation, even + * though its actual usize was insufficient to cross the + * sample threshold. + */ + prof_alloc_rollback(tsd, tctx, true); + tctx = (prof_tctx_t *)(uintptr_t)1U; + } + } + + sampled = ((uintptr_t)tctx > (uintptr_t)1U); + old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); + moved = (ptr != old_ptr); + + if (unlikely(sampled)) { + prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); + } else if (moved) { + prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL, + (prof_tctx_t *)(uintptr_t)1U); + } else if (unlikely(old_sampled)) { + /* + * prof_tctx_set() would work for the !moved case as well, but + * prof_tctx_reset() is slightly cheaper, and the proper thing + * to do here in the presence of explicit knowledge re: moved + * state. + */ + prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx); + } else { + assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) == + (uintptr_t)1U); + } + + /* + * The prof_free_sampled_object() call must come after the + * prof_malloc_sample_object() call, because tctx and old_tctx may be + * the same, in which case reversing the call order could cause the tctx + * to be prematurely destroyed as a side effect of momentarily zeroed + * counters. + */ + if (unlikely(old_sampled)) { + prof_free_sampled_object(tsd, ptr, old_usize, old_tctx); + } +} + +JEMALLOC_ALWAYS_INLINE void +prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) { + prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); + + cassert(config_prof); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { + prof_free_sampled_object(tsd, ptr, usize, tctx); + } +} + +#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/prof_structs.h b/deps/jemalloc/include/jemalloc/internal/prof_structs.h new file mode 100644 index 0000000..34ed482 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/prof_structs.h @@ -0,0 +1,200 @@ +#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H +#define JEMALLOC_INTERNAL_PROF_STRUCTS_H + +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/rb.h" + +struct prof_bt_s { + /* Backtrace, stored as len program counters. */ + void **vec; + unsigned len; +}; + +#ifdef JEMALLOC_PROF_LIBGCC +/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ +typedef struct { + prof_bt_t *bt; + unsigned max; +} prof_unwind_data_t; +#endif + +struct prof_accum_s { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_t mtx; + uint64_t accumbytes; +#else + atomic_u64_t accumbytes; +#endif +}; + +struct prof_cnt_s { + /* Profiling counters. */ + uint64_t curobjs; + uint64_t curbytes; + uint64_t accumobjs; + uint64_t accumbytes; +}; + +typedef enum { + prof_tctx_state_initializing, + prof_tctx_state_nominal, + prof_tctx_state_dumping, + prof_tctx_state_purgatory /* Dumper must finish destroying. */ +} prof_tctx_state_t; + +struct prof_tctx_s { + /* Thread data for thread that performed the allocation. */ + prof_tdata_t *tdata; + + /* + * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be + * defunct during teardown. + */ + uint64_t thr_uid; + uint64_t thr_discrim; + + /* Profiling counters, protected by tdata->lock. */ + prof_cnt_t cnts; + + /* Associated global context. */ + prof_gctx_t *gctx; + + /* + * UID that distinguishes multiple tctx's created by the same thread, + * but coexisting in gctx->tctxs. There are two ways that such + * coexistence can occur: + * - A dumper thread can cause a tctx to be retained in the purgatory + * state. + * - Although a single "producer" thread must create all tctx's which + * share the same thr_uid, multiple "consumers" can each concurrently + * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only + * gets called once each time cnts.cur{objs,bytes} drop to 0, but this + * threshold can be hit again before the first consumer finishes + * executing prof_tctx_destroy(). + */ + uint64_t tctx_uid; + + /* Linkage into gctx's tctxs. */ + rb_node(prof_tctx_t) tctx_link; + + /* + * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents + * sample vs destroy race. + */ + bool prepared; + + /* Current dump-related state, protected by gctx->lock. */ + prof_tctx_state_t state; + + /* + * Copy of cnts snapshotted during early dump phase, protected by + * dump_mtx. + */ + prof_cnt_t dump_cnts; +}; +typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; + +struct prof_gctx_s { + /* Protects nlimbo, cnt_summed, and tctxs. */ + malloc_mutex_t *lock; + + /* + * Number of threads that currently cause this gctx to be in a state of + * limbo due to one of: + * - Initializing this gctx. + * - Initializing per thread counters associated with this gctx. + * - Preparing to destroy this gctx. + * - Dumping a heap profile that includes this gctx. + * nlimbo must be 1 (single destroyer) in order to safely destroy the + * gctx. + */ + unsigned nlimbo; + + /* + * Tree of profile counters, one for each thread that has allocated in + * this context. + */ + prof_tctx_tree_t tctxs; + + /* Linkage for tree of contexts to be dumped. */ + rb_node(prof_gctx_t) dump_link; + + /* Temporary storage for summation during dump. */ + prof_cnt_t cnt_summed; + + /* Associated backtrace. */ + prof_bt_t bt; + + /* Backtrace vector, variable size, referred to by bt. */ + void *vec[1]; +}; +typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; + +struct prof_tdata_s { + malloc_mutex_t *lock; + + /* Monotonically increasing unique thread identifier. */ + uint64_t thr_uid; + + /* + * Monotonically increasing discriminator among tdata structures + * associated with the same thr_uid. + */ + uint64_t thr_discrim; + + /* Included in heap profile dumps if non-NULL. */ + char *thread_name; + + bool attached; + bool expired; + + rb_node(prof_tdata_t) tdata_link; + + /* + * Counter used to initialize prof_tctx_t's tctx_uid. No locking is + * necessary when incrementing this field, because only one thread ever + * does so. + */ + uint64_t tctx_uid_next; + + /* + * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks + * backtraces for which it has non-zero allocation/deallocation counters + * associated with thread-specific prof_tctx_t objects. Other threads + * may write to prof_tctx_t contents when freeing associated objects. + */ + ckh_t bt2tctx; + + /* Sampling state. */ + uint64_t prng_state; + + /* State used to avoid dumping while operating on prof internals. */ + bool enq; + bool enq_idump; + bool enq_gdump; + + /* + * Set to true during an early dump phase for tdata's which are + * currently being dumped. New threads' tdata's have this initialized + * to false so that they aren't accidentally included in later dump + * phases. + */ + bool dumping; + + /* + * True if profiling is active for this tdata's thread + * (thread.prof.active mallctl). + */ + bool active; + + /* Temporary storage for summation during dump. */ + prof_cnt_t cnt_summed; + + /* Backtrace vector, used for calls to prof_backtrace(). */ + void *vec[PROF_BT_MAX]; +}; +typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; + +#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/prof_types.h b/deps/jemalloc/include/jemalloc/internal/prof_types.h new file mode 100644 index 0000000..1eff995 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/prof_types.h @@ -0,0 +1,56 @@ +#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H +#define JEMALLOC_INTERNAL_PROF_TYPES_H + +typedef struct prof_bt_s prof_bt_t; +typedef struct prof_accum_s prof_accum_t; +typedef struct prof_cnt_s prof_cnt_t; +typedef struct prof_tctx_s prof_tctx_t; +typedef struct prof_gctx_s prof_gctx_t; +typedef struct prof_tdata_s prof_tdata_t; + +/* Option defaults. */ +#ifdef JEMALLOC_PROF +# define PROF_PREFIX_DEFAULT "jeprof" +#else +# define PROF_PREFIX_DEFAULT "" +#endif +#define LG_PROF_SAMPLE_DEFAULT 19 +#define LG_PROF_INTERVAL_DEFAULT -1 + +/* + * Hard limit on stack backtrace depth. The version of prof_backtrace() that + * is based on __builtin_return_address() necessarily has a hard-coded number + * of backtrace frame handlers, and should be kept in sync with this setting. + */ +#define PROF_BT_MAX 128 + +/* Initial hash table size. */ +#define PROF_CKH_MINITEMS 64 + +/* Size of memory buffer to use when writing dump files. */ +#define PROF_DUMP_BUFSIZE 65536 + +/* Size of stack-allocated buffer used by prof_printf(). */ +#define PROF_PRINTF_BUFSIZE 128 + +/* + * Number of mutexes shared among all gctx's. No space is allocated for these + * unless profiling is enabled, so it's okay to over-provision. + */ +#define PROF_NCTX_LOCKS 1024 + +/* + * Number of mutexes shared among all tdata's. No space is allocated for these + * unless profiling is enabled, so it's okay to over-provision. + */ +#define PROF_NTDATA_LOCKS 256 + +/* + * prof_tdata pointers close to NULL are used to encode state information that + * is used for cleaning up during thread shutdown. + */ +#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) +#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) +#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY + +#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/public_namespace.sh b/deps/jemalloc/include/jemalloc/internal/public_namespace.sh new file mode 100755 index 0000000..4d415ba --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/public_namespace.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +for nm in `cat $1` ; do + n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` + echo "#define je_${n} JEMALLOC_N(${n})" +done diff --git a/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh b/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh new file mode 100755 index 0000000..4239d17 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +for nm in `cat $1` ; do + n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` + echo "#undef je_${n}" +done diff --git a/deps/jemalloc/include/jemalloc/internal/ql.h b/deps/jemalloc/include/jemalloc/internal/ql.h new file mode 100644 index 0000000..8029040 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/ql.h @@ -0,0 +1,88 @@ +#ifndef JEMALLOC_INTERNAL_QL_H +#define JEMALLOC_INTERNAL_QL_H + +#include "jemalloc/internal/qr.h" + +/* List definitions. */ +#define ql_head(a_type) \ +struct { \ + a_type *qlh_first; \ +} + +#define ql_head_initializer(a_head) {NULL} + +#define ql_elm(a_type) qr(a_type) + +/* List functions. */ +#define ql_new(a_head) do { \ + (a_head)->qlh_first = NULL; \ +} while (0) + +#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) + +#define ql_first(a_head) ((a_head)->qlh_first) + +#define ql_last(a_head, a_field) \ + ((ql_first(a_head) != NULL) \ + ? qr_prev(ql_first(a_head), a_field) : NULL) + +#define ql_next(a_head, a_elm, a_field) \ + ((ql_last(a_head, a_field) != (a_elm)) \ + ? qr_next((a_elm), a_field) : NULL) + +#define ql_prev(a_head, a_elm, a_field) \ + ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ + : NULL) + +#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ + qr_before_insert((a_qlelm), (a_elm), a_field); \ + if (ql_first(a_head) == (a_qlelm)) { \ + ql_first(a_head) = (a_elm); \ + } \ +} while (0) + +#define ql_after_insert(a_qlelm, a_elm, a_field) \ + qr_after_insert((a_qlelm), (a_elm), a_field) + +#define ql_head_insert(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) != NULL) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = (a_elm); \ +} while (0) + +#define ql_tail_insert(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) != NULL) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = qr_next((a_elm), a_field); \ +} while (0) + +#define ql_remove(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) == (a_elm)) { \ + ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ + } \ + if (ql_first(a_head) != (a_elm)) { \ + qr_remove((a_elm), a_field); \ + } else { \ + ql_first(a_head) = NULL; \ + } \ +} while (0) + +#define ql_head_remove(a_head, a_type, a_field) do { \ + a_type *t = ql_first(a_head); \ + ql_remove((a_head), t, a_field); \ +} while (0) + +#define ql_tail_remove(a_head, a_type, a_field) do { \ + a_type *t = ql_last(a_head, a_field); \ + ql_remove((a_head), t, a_field); \ +} while (0) + +#define ql_foreach(a_var, a_head, a_field) \ + qr_foreach((a_var), ql_first(a_head), a_field) + +#define ql_reverse_foreach(a_var, a_head, a_field) \ + qr_reverse_foreach((a_var), ql_first(a_head), a_field) + +#endif /* JEMALLOC_INTERNAL_QL_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/qr.h b/deps/jemalloc/include/jemalloc/internal/qr.h new file mode 100644 index 0000000..1e1056b --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/qr.h @@ -0,0 +1,72 @@ +#ifndef JEMALLOC_INTERNAL_QR_H +#define JEMALLOC_INTERNAL_QR_H + +/* Ring definitions. */ +#define qr(a_type) \ +struct { \ + a_type *qre_next; \ + a_type *qre_prev; \ +} + +/* Ring functions. */ +#define qr_new(a_qr, a_field) do { \ + (a_qr)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) + +#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) + +#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ + (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ + (a_qr)->a_field.qre_next = (a_qrelm); \ + (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ + (a_qrelm)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_after_insert(a_qrelm, a_qr, a_field) do { \ + (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ + (a_qr)->a_field.qre_prev = (a_qrelm); \ + (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ + (a_qrelm)->a_field.qre_next = (a_qr); \ +} while (0) + +#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \ + a_type *t; \ + (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ + (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ + t = (a_qr_a)->a_field.qre_prev; \ + (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ + (a_qr_b)->a_field.qre_prev = t; \ +} while (0) + +/* + * qr_meld() and qr_split() are functionally equivalent, so there's no need to + * have two copies of the code. + */ +#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \ + qr_meld((a_qr_a), (a_qr_b), a_type, a_field) + +#define qr_remove(a_qr, a_field) do { \ + (a_qr)->a_field.qre_prev->a_field.qre_next \ + = (a_qr)->a_field.qre_next; \ + (a_qr)->a_field.qre_next->a_field.qre_prev \ + = (a_qr)->a_field.qre_prev; \ + (a_qr)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_foreach(var, a_qr, a_field) \ + for ((var) = (a_qr); \ + (var) != NULL; \ + (var) = (((var)->a_field.qre_next != (a_qr)) \ + ? (var)->a_field.qre_next : NULL)) + +#define qr_reverse_foreach(var, a_qr, a_field) \ + for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ + (var) != NULL; \ + (var) = (((var) != (a_qr)) \ + ? (var)->a_field.qre_prev : NULL)) + +#endif /* JEMALLOC_INTERNAL_QR_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/quantum.h b/deps/jemalloc/include/jemalloc/internal/quantum.h new file mode 100644 index 0000000..821086e --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/quantum.h @@ -0,0 +1,77 @@ +#ifndef JEMALLOC_INTERNAL_QUANTUM_H +#define JEMALLOC_INTERNAL_QUANTUM_H + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +#ifndef LG_QUANTUM +# if (defined(__i386__) || defined(_M_IX86)) +# define LG_QUANTUM 4 +# endif +# ifdef __ia64__ +# define LG_QUANTUM 4 +# endif +# ifdef __alpha__ +# define LG_QUANTUM 4 +# endif +# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) +# define LG_QUANTUM 4 +# endif +# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) +# define LG_QUANTUM 4 +# endif +# ifdef __arm__ +# define LG_QUANTUM 3 +# endif +# ifdef __aarch64__ +# define LG_QUANTUM 4 +# endif +# ifdef __hppa__ +# define LG_QUANTUM 4 +# endif +# ifdef __m68k__ +# define LG_QUANTUM 3 +# endif +# ifdef __mips__ +# define LG_QUANTUM 3 +# endif +# ifdef __nios2__ +# define LG_QUANTUM 3 +# endif +# ifdef __or1k__ +# define LG_QUANTUM 3 +# endif +# ifdef __powerpc__ +# define LG_QUANTUM 4 +# endif +# if defined(__riscv) || defined(__riscv__) +# define LG_QUANTUM 4 +# endif +# ifdef __s390__ +# define LG_QUANTUM 4 +# endif +# if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \ + defined(__SH4_SINGLE_ONLY__)) +# define LG_QUANTUM 4 +# endif +# ifdef __tile__ +# define LG_QUANTUM 4 +# endif +# ifdef __le32__ +# define LG_QUANTUM 4 +# endif +# ifndef LG_QUANTUM +# error "Unknown minimum alignment for architecture; specify via " + "--with-lg-quantum" +# endif +#endif + +#define QUANTUM ((size_t)(1U << LG_QUANTUM)) +#define QUANTUM_MASK (QUANTUM - 1) + +/* Return the smallest quantum multiple that is >= a. */ +#define QUANTUM_CEILING(a) \ + (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) + +#endif /* JEMALLOC_INTERNAL_QUANTUM_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/rb.h b/deps/jemalloc/include/jemalloc/internal/rb.h new file mode 100644 index 0000000..47fa5ca --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/rb.h @@ -0,0 +1,1006 @@ +/*- + ******************************************************************************* + * + * cpp macro implementation of left-leaning 2-3 red-black trees. Parent + * pointers are not used, and color bits are stored in the least significant + * bit of right-child pointers (if RB_COMPACT is defined), thus making node + * linkage as compact as is possible for red-black trees. + * + * Usage: + * + * #include + * #include + * #define NDEBUG // (Optional, see assert(3).) + * #include + * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) + * #include + * ... + * + ******************************************************************************* + */ + +#ifndef RB_H_ +#define RB_H_ + +#ifndef __PGI +#define RB_COMPACT +#endif + +#ifdef RB_COMPACT +/* Node structure. */ +#define rb_node(a_type) \ +struct { \ + a_type *rbn_left; \ + a_type *rbn_right_red; \ +} +#else +#define rb_node(a_type) \ +struct { \ + a_type *rbn_left; \ + a_type *rbn_right; \ + bool rbn_red; \ +} +#endif + +/* Root structure. */ +#define rb_tree(a_type) \ +struct { \ + a_type *rbt_root; \ +} + +/* Left accessors. */ +#define rbtn_left_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_left) +#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ + (a_node)->a_field.rbn_left = a_left; \ +} while (0) + +#ifdef RB_COMPACT +/* Right accessors. */ +#define rbtn_right_get(a_type, a_field, a_node) \ + ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ + & ((ssize_t)-2))) +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ + | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ +} while (0) + +/* Color accessors. */ +#define rbtn_red_get(a_type, a_field, a_node) \ + ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ + & ((size_t)1))) +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ + (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ + | ((ssize_t)a_red)); \ +} while (0) +#define rbtn_red_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ + (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ +} while (0) +#define rbtn_black_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ + (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ +} while (0) + +/* Node initializer. */ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + /* Bookkeeping bit cannot be used by node pointer. */ \ + assert(((uintptr_t)(a_node) & 0x1) == 0); \ + rbtn_left_set(a_type, a_field, (a_node), NULL); \ + rbtn_right_set(a_type, a_field, (a_node), NULL); \ + rbtn_red_set(a_type, a_field, (a_node)); \ +} while (0) +#else +/* Right accessors. */ +#define rbtn_right_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_right) +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ + (a_node)->a_field.rbn_right = a_right; \ +} while (0) + +/* Color accessors. */ +#define rbtn_red_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_red) +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ + (a_node)->a_field.rbn_red = (a_red); \ +} while (0) +#define rbtn_red_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_red = true; \ +} while (0) +#define rbtn_black_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_red = false; \ +} while (0) + +/* Node initializer. */ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + rbtn_left_set(a_type, a_field, (a_node), NULL); \ + rbtn_right_set(a_type, a_field, (a_node), NULL); \ + rbtn_red_set(a_type, a_field, (a_node)); \ +} while (0) +#endif + +/* Tree initializer. */ +#define rb_new(a_type, a_field, a_rbt) do { \ + (a_rbt)->rbt_root = NULL; \ +} while (0) + +/* Internal utility macros. */ +#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ + (r_node) = (a_root); \ + if ((r_node) != NULL) { \ + for (; \ + rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ + (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ + } \ + } \ +} while (0) + +#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ + (r_node) = (a_root); \ + if ((r_node) != NULL) { \ + for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ + (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ + } \ + } \ +} while (0) + +#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ + (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ + rbtn_right_set(a_type, a_field, (a_node), \ + rbtn_left_get(a_type, a_field, (r_node))); \ + rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ +} while (0) + +#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ + (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ + rbtn_left_set(a_type, a_field, (a_node), \ + rbtn_right_get(a_type, a_field, (r_node))); \ + rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ +} while (0) + +/* + * The rb_proto() macro generates function prototypes that correspond to the + * functions generated by an equivalently parameterized call to rb_gen(). + */ + +#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ +a_attr void \ +a_prefix##new(a_rbt_type *rbtree); \ +a_attr bool \ +a_prefix##empty(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##first(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##last(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##next(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##search(a_rbt_type *rbtree, const a_type *key); \ +a_attr a_type * \ +a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \ +a_attr a_type * \ +a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \ +a_attr void \ +a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ +a_attr void \ +a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ + a_rbt_type *, a_type *, void *), void *arg); \ +a_attr a_type * \ +a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ +a_attr void \ +a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ + void *arg); + +/* + * The rb_gen() macro generates a type-specific red-black tree implementation, + * based on the above cpp macros. + * + * Arguments: + * + * a_attr : Function attribute for generated functions (ex: static). + * a_prefix : Prefix for generated functions (ex: ex_). + * a_rb_type : Type for red-black tree data structure (ex: ex_t). + * a_type : Type for red-black tree node data structure (ex: ex_node_t). + * a_field : Name of red-black tree node linkage (ex: ex_link). + * a_cmp : Node comparison function name, with the following prototype: + * int (a_cmp *)(a_type *a_node, a_type *a_other); + * ^^^^^^ + * or a_key + * Interpretation of comparison function return values: + * -1 : a_node < a_other + * 0 : a_node == a_other + * 1 : a_node > a_other + * In all cases, the a_node or a_key macro argument is the first + * argument to the comparison function, which makes it possible + * to write comparison functions that treat the first argument + * specially. + * + * Assuming the following setup: + * + * typedef struct ex_node_s ex_node_t; + * struct ex_node_s { + * rb_node(ex_node_t) ex_link; + * }; + * typedef rb_tree(ex_node_t) ex_t; + * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) + * + * The following API is generated: + * + * static void + * ex_new(ex_t *tree); + * Description: Initialize a red-black tree structure. + * Args: + * tree: Pointer to an uninitialized red-black tree object. + * + * static bool + * ex_empty(ex_t *tree); + * Description: Determine whether tree is empty. + * Args: + * tree: Pointer to an initialized red-black tree object. + * Ret: True if tree is empty, false otherwise. + * + * static ex_node_t * + * ex_first(ex_t *tree); + * static ex_node_t * + * ex_last(ex_t *tree); + * Description: Get the first/last node in tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * Ret: First/last node in tree, or NULL if tree is empty. + * + * static ex_node_t * + * ex_next(ex_t *tree, ex_node_t *node); + * static ex_node_t * + * ex_prev(ex_t *tree, ex_node_t *node); + * Description: Get node's successor/predecessor. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: A node in tree. + * Ret: node's successor/predecessor in tree, or NULL if node is + * last/first. + * + * static ex_node_t * + * ex_search(ex_t *tree, const ex_node_t *key); + * Description: Search for node that matches key. + * Args: + * tree: Pointer to an initialized red-black tree object. + * key : Search key. + * Ret: Node in tree that matches key, or NULL if no match. + * + * static ex_node_t * + * ex_nsearch(ex_t *tree, const ex_node_t *key); + * static ex_node_t * + * ex_psearch(ex_t *tree, const ex_node_t *key); + * Description: Search for node that matches key. If no match is found, + * return what would be key's successor/predecessor, were + * key in tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * key : Search key. + * Ret: Node in tree that matches key, or if no match, hypothetical node's + * successor/predecessor (NULL if no successor/predecessor). + * + * static void + * ex_insert(ex_t *tree, ex_node_t *node); + * Description: Insert node into tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: Node to be inserted into tree. + * + * static void + * ex_remove(ex_t *tree, ex_node_t *node); + * Description: Remove node from tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: Node in tree to be removed. + * + * static ex_node_t * + * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, + * ex_node_t *, void *), void *arg); + * static ex_node_t * + * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, + * ex_node_t *, void *), void *arg); + * Description: Iterate forward/backward over tree, starting at node. If + * tree is modified, iteration must be immediately + * terminated by the callback function that causes the + * modification. + * Args: + * tree : Pointer to an initialized red-black tree object. + * start: Node at which to start iteration, or NULL to start at + * first/last node. + * cb : Callback function, which is called for each node during + * iteration. Under normal circumstances the callback function + * should return NULL, which causes iteration to continue. If a + * callback function returns non-NULL, iteration is immediately + * terminated and the non-NULL return value is returned by the + * iterator. This is useful for re-starting iteration after + * modifying tree. + * arg : Opaque pointer passed to cb(). + * Ret: NULL if iteration completed, or the non-NULL callback return value + * that caused termination of the iteration. + * + * static void + * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg); + * Description: Iterate over the tree with post-order traversal, remove + * each node, and run the callback if non-null. This is + * used for destroying a tree without paying the cost to + * rebalance it. The tree must not be otherwise altered + * during traversal. + * Args: + * tree: Pointer to an initialized red-black tree object. + * cb : Callback function, which, if non-null, is called for each node + * during iteration. There is no way to stop iteration once it + * has begun. + * arg : Opaque pointer passed to cb(). + */ +#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ +a_attr void \ +a_prefix##new(a_rbt_type *rbtree) { \ + rb_new(a_type, a_field, rbtree); \ +} \ +a_attr bool \ +a_prefix##empty(a_rbt_type *rbtree) { \ + return (rbtree->rbt_root == NULL); \ +} \ +a_attr a_type * \ +a_prefix##first(a_rbt_type *rbtree) { \ + a_type *ret; \ + rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ + return ret; \ +} \ +a_attr a_type * \ +a_prefix##last(a_rbt_type *rbtree) { \ + a_type *ret; \ + rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ + return ret; \ +} \ +a_attr a_type * \ +a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ + a_type *ret; \ + if (rbtn_right_get(a_type, a_field, node) != NULL) { \ + rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ + a_field, node), ret); \ + } else { \ + a_type *tnode = rbtree->rbt_root; \ + assert(tnode != NULL); \ + ret = NULL; \ + while (true) { \ + int cmp = (a_cmp)(node, tnode); \ + if (cmp < 0) { \ + ret = tnode; \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + break; \ + } \ + assert(tnode != NULL); \ + } \ + } \ + return ret; \ +} \ +a_attr a_type * \ +a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ + a_type *ret; \ + if (rbtn_left_get(a_type, a_field, node) != NULL) { \ + rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ + a_field, node), ret); \ + } else { \ + a_type *tnode = rbtree->rbt_root; \ + assert(tnode != NULL); \ + ret = NULL; \ + while (true) { \ + int cmp = (a_cmp)(node, tnode); \ + if (cmp < 0) { \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + ret = tnode; \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + break; \ + } \ + assert(tnode != NULL); \ + } \ + } \ + return ret; \ +} \ +a_attr a_type * \ +a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \ + a_type *ret; \ + int cmp; \ + ret = rbtree->rbt_root; \ + while (ret != NULL \ + && (cmp = (a_cmp)(key, ret)) != 0) { \ + if (cmp < 0) { \ + ret = rbtn_left_get(a_type, a_field, ret); \ + } else { \ + ret = rbtn_right_get(a_type, a_field, ret); \ + } \ + } \ + return ret; \ +} \ +a_attr a_type * \ +a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \ + a_type *ret; \ + a_type *tnode = rbtree->rbt_root; \ + ret = NULL; \ + while (tnode != NULL) { \ + int cmp = (a_cmp)(key, tnode); \ + if (cmp < 0) { \ + ret = tnode; \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + ret = tnode; \ + break; \ + } \ + } \ + return ret; \ +} \ +a_attr a_type * \ +a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ + a_type *ret; \ + a_type *tnode = rbtree->rbt_root; \ + ret = NULL; \ + while (tnode != NULL) { \ + int cmp = (a_cmp)(key, tnode); \ + if (cmp < 0) { \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + ret = tnode; \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + ret = tnode; \ + break; \ + } \ + } \ + return ret; \ +} \ +a_attr void \ +a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ + struct { \ + a_type *node; \ + int cmp; \ + } path[sizeof(void *) << 4], *pathp; \ + rbt_node_new(a_type, a_field, rbtree, node); \ + /* Wind. */ \ + path->node = rbtree->rbt_root; \ + for (pathp = path; pathp->node != NULL; pathp++) { \ + int cmp = pathp->cmp = a_cmp(node, pathp->node); \ + assert(cmp != 0); \ + if (cmp < 0) { \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } else { \ + pathp[1].node = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + } \ + } \ + pathp->node = node; \ + /* Unwind. */ \ + for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ + a_type *cnode = pathp->node; \ + if (pathp->cmp < 0) { \ + a_type *left = pathp[1].node; \ + rbtn_left_set(a_type, a_field, cnode, left); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ + /* Fix up 4-node. */ \ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, cnode, tnode); \ + cnode = tnode; \ + } \ + } else { \ + return; \ + } \ + } else { \ + a_type *right = pathp[1].node; \ + rbtn_right_set(a_type, a_field, cnode, right); \ + if (rbtn_red_get(a_type, a_field, right)) { \ + a_type *left = rbtn_left_get(a_type, a_field, cnode); \ + if (left != NULL && rbtn_red_get(a_type, a_field, \ + left)) { \ + /* Split 4-node. */ \ + rbtn_black_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, right); \ + rbtn_red_set(a_type, a_field, cnode); \ + } else { \ + /* Lean left. */ \ + a_type *tnode; \ + bool tred = rbtn_red_get(a_type, a_field, cnode); \ + rbtn_rotate_left(a_type, a_field, cnode, tnode); \ + rbtn_color_set(a_type, a_field, tnode, tred); \ + rbtn_red_set(a_type, a_field, cnode); \ + cnode = tnode; \ + } \ + } else { \ + return; \ + } \ + } \ + pathp->node = cnode; \ + } \ + /* Set root, and make it black. */ \ + rbtree->rbt_root = path->node; \ + rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ +} \ +a_attr void \ +a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ + struct { \ + a_type *node; \ + int cmp; \ + } *pathp, *nodep, path[sizeof(void *) << 4]; \ + /* Wind. */ \ + nodep = NULL; /* Silence compiler warning. */ \ + path->node = rbtree->rbt_root; \ + for (pathp = path; pathp->node != NULL; pathp++) { \ + int cmp = pathp->cmp = a_cmp(node, pathp->node); \ + if (cmp < 0) { \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } else { \ + pathp[1].node = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + if (cmp == 0) { \ + /* Find node's successor, in preparation for swap. */ \ + pathp->cmp = 1; \ + nodep = pathp; \ + for (pathp++; pathp->node != NULL; pathp++) { \ + pathp->cmp = -1; \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } \ + break; \ + } \ + } \ + } \ + assert(nodep->node == node); \ + pathp--; \ + if (pathp->node != node) { \ + /* Swap node with its successor. */ \ + bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ + rbtn_color_set(a_type, a_field, pathp->node, \ + rbtn_red_get(a_type, a_field, node)); \ + rbtn_left_set(a_type, a_field, pathp->node, \ + rbtn_left_get(a_type, a_field, node)); \ + /* If node's successor is its right child, the following code */\ + /* will do the wrong thing for the right child pointer. */\ + /* However, it doesn't matter, because the pointer will be */\ + /* properly set when the successor is pruned. */\ + rbtn_right_set(a_type, a_field, pathp->node, \ + rbtn_right_get(a_type, a_field, node)); \ + rbtn_color_set(a_type, a_field, node, tred); \ + /* The pruned leaf node's child pointers are never accessed */\ + /* again, so don't bother setting them to nil. */\ + nodep->node = pathp->node; \ + pathp->node = node; \ + if (nodep == path) { \ + rbtree->rbt_root = nodep->node; \ + } else { \ + if (nodep[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, nodep[-1].node, \ + nodep->node); \ + } else { \ + rbtn_right_set(a_type, a_field, nodep[-1].node, \ + nodep->node); \ + } \ + } \ + } else { \ + a_type *left = rbtn_left_get(a_type, a_field, node); \ + if (left != NULL) { \ + /* node has no successor, but it has a left child. */\ + /* Splice node out, without losing the left child. */\ + assert(!rbtn_red_get(a_type, a_field, node)); \ + assert(rbtn_red_get(a_type, a_field, left)); \ + rbtn_black_set(a_type, a_field, left); \ + if (pathp == path) { \ + rbtree->rbt_root = left; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + left); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + left); \ + } \ + } \ + return; \ + } else if (pathp == path) { \ + /* The tree only contained one node. */ \ + rbtree->rbt_root = NULL; \ + return; \ + } \ + } \ + if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + /* Prune red node, which requires no fixup. */ \ + assert(pathp[-1].cmp < 0); \ + rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ + return; \ + } \ + /* The node to be pruned is black, so unwind until balance is */\ + /* restored. */\ + pathp->node = NULL; \ + for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ + assert(pathp->cmp != 0); \ + if (pathp->cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp->node, \ + pathp[1].node); \ + if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + a_type *right = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + a_type *rightleft = rbtn_left_get(a_type, a_field, \ + right); \ + a_type *tnode; \ + if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ + rightleft)) { \ + /* In the following diagrams, ||, //, and \\ */\ + /* indicate the path to the removed node. */\ + /* */\ + /* || */\ + /* pathp(r) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + /* */\ + rbtn_black_set(a_type, a_field, pathp->node); \ + rbtn_rotate_right(a_type, a_field, right, tnode); \ + rbtn_right_set(a_type, a_field, pathp->node, tnode);\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + } else { \ + /* || */\ + /* pathp(r) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + /* */\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + } \ + /* Balance restored, but rotation modified subtree */\ + /* root. */\ + assert((uintptr_t)pathp > (uintptr_t)path); \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + return; \ + } else { \ + a_type *right = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + a_type *rightleft = rbtn_left_get(a_type, a_field, \ + right); \ + if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ + rightleft)) { \ + /* || */\ + /* pathp(b) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, rightleft); \ + rbtn_rotate_right(a_type, a_field, right, tnode); \ + rbtn_right_set(a_type, a_field, pathp->node, tnode);\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root, which may actually be the tree */\ + /* root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + a_type *tnode; \ + rbtn_red_set(a_type, a_field, pathp->node); \ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + pathp->node = tnode; \ + } \ + } \ + } else { \ + a_type *left; \ + rbtn_right_set(a_type, a_field, pathp->node, \ + pathp[1].node); \ + left = rbtn_left_get(a_type, a_field, pathp->node); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + a_type *tnode; \ + a_type *leftright = rbtn_right_get(a_type, a_field, \ + left); \ + a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ + leftright); \ + if (leftrightleft != NULL && rbtn_red_get(a_type, \ + a_field, leftrightleft)) { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (r) (b) */\ + /* \ */\ + /* (b) */\ + /* / */\ + /* (r) */\ + a_type *unode; \ + rbtn_black_set(a_type, a_field, leftrightleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + unode); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + rbtn_right_set(a_type, a_field, unode, tnode); \ + rbtn_rotate_left(a_type, a_field, unode, tnode); \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (r) (b) */\ + /* \ */\ + /* (b) */\ + /* / */\ + /* (b) */\ + assert(leftright != NULL); \ + rbtn_red_set(a_type, a_field, leftright); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + rbtn_black_set(a_type, a_field, tnode); \ + } \ + /* Balance restored, but rotation modified subtree */\ + /* root, which may actually be the tree root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + } \ + return; \ + } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ + /* || */\ + /* pathp(r) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, pathp->node); \ + rbtn_red_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root. */\ + assert((uintptr_t)pathp > (uintptr_t)path); \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(r) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + rbtn_red_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, pathp->node); \ + /* Balance restored. */ \ + return; \ + } \ + } else { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root, which may actually be the tree */\ + /* root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + rbtn_red_set(a_type, a_field, left); \ + } \ + } \ + } \ + } \ + /* Set root. */ \ + rbtree->rbt_root = path->node; \ + assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ +} \ +a_attr a_type * \ +a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + if (node == NULL) { \ + return NULL; \ + } else { \ + a_type *ret; \ + if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ + a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \ + arg)) != NULL) { \ + return ret; \ + } \ + return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg); \ + } \ +} \ +a_attr a_type * \ +a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + int cmp = a_cmp(start, node); \ + if (cmp < 0) { \ + a_type *ret; \ + if ((ret = a_prefix##iter_start(rbtree, start, \ + rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ + } \ + return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg); \ + } else if (cmp > 0) { \ + return a_prefix##iter_start(rbtree, start, \ + rbtn_right_get(a_type, a_field, node), cb, arg); \ + } else { \ + a_type *ret; \ + if ((ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ + } \ + return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg); \ + } \ +} \ +a_attr a_type * \ +a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ + a_rbt_type *, a_type *, void *), void *arg) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ + cb, arg); \ + } else { \ + ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ + } \ + return ret; \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + if (node == NULL) { \ + return NULL; \ + } else { \ + a_type *ret; \ + if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ + } \ + return a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ + } \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ + a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ + void *arg) { \ + int cmp = a_cmp(start, node); \ + if (cmp > 0) { \ + a_type *ret; \ + if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ + } \ + return a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ + } else if (cmp < 0) { \ + return a_prefix##reverse_iter_start(rbtree, start, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ + } else { \ + a_type *ret; \ + if ((ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ + } \ + return a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ + } \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##reverse_iter_start(rbtree, start, \ + rbtree->rbt_root, cb, arg); \ + } else { \ + ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ + cb, arg); \ + } \ + return ret; \ +} \ +a_attr void \ +a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \ + a_type *, void *), void *arg) { \ + if (node == NULL) { \ + return; \ + } \ + a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \ + node), cb, arg); \ + rbtn_left_set(a_type, a_field, (node), NULL); \ + a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \ + node), cb, arg); \ + rbtn_right_set(a_type, a_field, (node), NULL); \ + if (cb) { \ + cb(node, arg); \ + } \ +} \ +a_attr void \ +a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ + void *arg) { \ + a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ + rbtree->rbt_root = NULL; \ +} + +#endif /* RB_H_ */ diff --git a/deps/jemalloc/include/jemalloc/internal/rtree.h b/deps/jemalloc/include/jemalloc/internal/rtree.h new file mode 100644 index 0000000..16ccbeb --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/rtree.h @@ -0,0 +1,528 @@ +#ifndef JEMALLOC_INTERNAL_RTREE_H +#define JEMALLOC_INTERNAL_RTREE_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree_tsd.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/tsd.h" + +/* + * This radix tree implementation is tailored to the singular purpose of + * associating metadata with extents that are currently owned by jemalloc. + * + ******************************************************************************* + */ + +/* Number of high insignificant bits. */ +#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR) +/* Number of low insigificant bits. */ +#define RTREE_NLIB LG_PAGE +/* Number of significant bits. */ +#define RTREE_NSB (LG_VADDR - RTREE_NLIB) +/* Number of levels in radix tree. */ +#if RTREE_NSB <= 10 +# define RTREE_HEIGHT 1 +#elif RTREE_NSB <= 36 +# define RTREE_HEIGHT 2 +#elif RTREE_NSB <= 52 +# define RTREE_HEIGHT 3 +#else +# error Unsupported number of significant virtual address bits +#endif +/* Use compact leaf representation if virtual address encoding allows. */ +#if RTREE_NHIB >= LG_CEIL(SC_NSIZES) +# define RTREE_LEAF_COMPACT +#endif + +/* Needed for initialization only. */ +#define RTREE_LEAFKEY_INVALID ((uintptr_t)1) + +typedef struct rtree_node_elm_s rtree_node_elm_t; +struct rtree_node_elm_s { + atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */ +}; + +struct rtree_leaf_elm_s { +#ifdef RTREE_LEAF_COMPACT + /* + * Single pointer-width field containing all three leaf element fields. + * For example, on a 64-bit x64 system with 48 significant virtual + * memory address bits, the index, extent, and slab fields are packed as + * such: + * + * x: index + * e: extent + * b: slab + * + * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b + */ + atomic_p_t le_bits; +#else + atomic_p_t le_extent; /* (extent_t *) */ + atomic_u_t le_szind; /* (szind_t) */ + atomic_b_t le_slab; /* (bool) */ +#endif +}; + +typedef struct rtree_level_s rtree_level_t; +struct rtree_level_s { + /* Number of key bits distinguished by this level. */ + unsigned bits; + /* + * Cumulative number of key bits distinguished by traversing to + * corresponding tree level. + */ + unsigned cumbits; +}; + +typedef struct rtree_s rtree_t; +struct rtree_s { + malloc_mutex_t init_lock; + /* Number of elements based on rtree_levels[0].bits. */ +#if RTREE_HEIGHT > 1 + rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; +#else + rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; +#endif +}; + +/* + * Split the bits into one to three partitions depending on number of + * significant bits. It the number of bits does not divide evenly into the + * number of levels, place one remainder bit per level starting at the leaf + * level. + */ +static const rtree_level_t rtree_levels[] = { +#if RTREE_HEIGHT == 1 + {RTREE_NSB, RTREE_NHIB + RTREE_NSB} +#elif RTREE_HEIGHT == 2 + {RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2}, + {RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB} +#elif RTREE_HEIGHT == 3 + {RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3}, + {RTREE_NSB/3 + RTREE_NSB%3/2, + RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2}, + {RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB} +#else +# error Unsupported rtree height +#endif +}; + +bool rtree_new(rtree_t *rtree, bool zeroed); + +typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t); +extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc; + +typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t); +extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc; + +typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *); +extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc; + +typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *); +extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc; +#ifdef JEMALLOC_JET +void rtree_delete(tsdn_t *tsdn, rtree_t *rtree); +#endif +rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, + rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing); + +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_leafkey(uintptr_t key) { + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); + unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - + rtree_levels[RTREE_HEIGHT-1].bits); + unsigned maskbits = ptrbits - cumbits; + uintptr_t mask = ~((ZU(1) << maskbits) - 1); + return (key & mask); +} + +JEMALLOC_ALWAYS_INLINE size_t +rtree_cache_direct_map(uintptr_t key) { + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); + unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - + rtree_levels[RTREE_HEIGHT-1].bits); + unsigned maskbits = ptrbits - cumbits; + return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1)); +} + +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_subkey(uintptr_t key, unsigned level) { + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); + unsigned cumbits = rtree_levels[level].cumbits; + unsigned shiftbits = ptrbits - cumbits; + unsigned maskbits = rtree_levels[level].bits; + uintptr_t mask = (ZU(1) << maskbits) - 1; + return ((key >> shiftbits) & mask); +} + +/* + * Atomic getters. + * + * dependent: Reading a value on behalf of a pointer to a valid allocation + * is guaranteed to be a clean read even without synchronization, + * because the rtree update became visible in memory before the + * pointer came into existence. + * !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be + * dependent on a previous rtree write, which means a stale read + * could result if synchronization were omitted here. + */ +# ifdef RTREE_LEAF_COMPACT +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, bool dependent) { + return (uintptr_t)atomic_load_p(&elm->le_bits, dependent + ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); +} + +JEMALLOC_ALWAYS_INLINE extent_t * +rtree_leaf_elm_bits_extent_get(uintptr_t bits) { +# ifdef __aarch64__ + /* + * aarch64 doesn't sign extend the highest virtual address bit to set + * the higher ones. Instead, the high bits gets zeroed. + */ + uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1; + /* Mask off the slab bit. */ + uintptr_t low_bit_mask = ~(uintptr_t)1; + uintptr_t mask = high_bit_mask & low_bit_mask; + return (extent_t *)(bits & mask); +# else + /* Restore sign-extended high bits, mask slab bit. */ + return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >> + RTREE_NHIB) & ~((uintptr_t)0x1)); +# endif +} + +JEMALLOC_ALWAYS_INLINE szind_t +rtree_leaf_elm_bits_szind_get(uintptr_t bits) { + return (szind_t)(bits >> LG_VADDR); +} + +JEMALLOC_ALWAYS_INLINE bool +rtree_leaf_elm_bits_slab_get(uintptr_t bits) { + return (bool)(bits & (uintptr_t)0x1); +} + +# endif + +JEMALLOC_ALWAYS_INLINE extent_t * +rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, bool dependent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + return rtree_leaf_elm_bits_extent_get(bits); +#else + extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent + ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); + return extent; +#endif +} + +JEMALLOC_ALWAYS_INLINE szind_t +rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, bool dependent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + return rtree_leaf_elm_bits_szind_get(bits); +#else + return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED + : ATOMIC_ACQUIRE); +#endif +} + +JEMALLOC_ALWAYS_INLINE bool +rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, bool dependent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + return rtree_leaf_elm_bits_slab_get(bits); +#else + return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED : + ATOMIC_ACQUIRE); +#endif +} + +static inline void +rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, extent_t *extent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); + uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << + LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) + | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE); +#endif +} + +static inline void +rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, szind_t szind) { + assert(szind <= SC_NSIZES); + +#ifdef RTREE_LEAF_COMPACT + uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, + true); + uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | + ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & + (((uintptr_t)0x1 << LG_VADDR) - 1)) | + ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE); +#endif +} + +static inline void +rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, bool slab) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, + true); + uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << + LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & + (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE); +#endif +} + +static inline void +rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | + ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) | + ((uintptr_t)slab); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); + rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); + /* + * Write extent last, since the element is atomically considered valid + * as soon as the extent field is non-NULL. + */ + rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent); +#endif +} + +static inline void +rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, szind_t szind, bool slab) { + assert(!slab || szind < SC_NBINS); + + /* + * The caller implicitly assures that it is the only writer to the szind + * and slab fields, and that the extent field cannot currently change. + */ + rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); + rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); +} + +JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * +rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, bool init_missing) { + assert(key != 0); + assert(!dependent || !init_missing); + + size_t slot = rtree_cache_direct_map(key); + uintptr_t leafkey = rtree_leafkey(key); + assert(leafkey != RTREE_LEAFKEY_INVALID); + + /* Fast path: L1 direct mapped cache. */ + if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) { + rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; + assert(leaf != NULL); + uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); + return &leaf[subkey]; + } + /* + * Search the L2 LRU cache. On hit, swap the matching element into the + * slot in L1 cache, and move the position in L2 up by 1. + */ +#define RTREE_CACHE_CHECK_L2(i) do { \ + if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \ + rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \ + assert(leaf != NULL); \ + if (i > 0) { \ + /* Bubble up by one. */ \ + rtree_ctx->l2_cache[i].leafkey = \ + rtree_ctx->l2_cache[i - 1].leafkey; \ + rtree_ctx->l2_cache[i].leaf = \ + rtree_ctx->l2_cache[i - 1].leaf; \ + rtree_ctx->l2_cache[i - 1].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[i - 1].leaf = \ + rtree_ctx->cache[slot].leaf; \ + } else { \ + rtree_ctx->l2_cache[0].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[0].leaf = \ + rtree_ctx->cache[slot].leaf; \ + } \ + rtree_ctx->cache[slot].leafkey = leafkey; \ + rtree_ctx->cache[slot].leaf = leaf; \ + uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \ + return &leaf[subkey]; \ + } \ +} while (0) + /* Check the first cache entry. */ + RTREE_CACHE_CHECK_L2(0); + /* Search the remaining cache elements. */ + for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) { + RTREE_CACHE_CHECK_L2(i); + } +#undef RTREE_CACHE_CHECK_L2 + + return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key, + dependent, init_missing); +} + +static inline bool +rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, + extent_t *extent, szind_t szind, bool slab) { + /* Use rtree_clear() to set the extent to NULL. */ + assert(extent != NULL); + + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, + key, false, true); + if (elm == NULL) { + return true; + } + + assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL); + rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab); + + return false; +} + +JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * +rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, + bool dependent) { + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, + key, dependent, false); + if (!dependent && elm == NULL) { + return NULL; + } + assert(elm != NULL); + return elm; +} + +JEMALLOC_ALWAYS_INLINE extent_t * +rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return NULL; + } + return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); +} + +JEMALLOC_ALWAYS_INLINE szind_t +rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return SC_NSIZES; + } + return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); +} + +/* + * rtree_slab_read() is intentionally omitted because slab is always read in + * conjunction with szind, which makes rtree_szind_slab_read() a better choice. + */ + +JEMALLOC_ALWAYS_INLINE bool +rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return true; + } + *r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); + *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); + return false; +} + +/* + * Try to read szind_slab from the L1 cache. Returns true on a hit, + * and fills in r_szind and r_slab. Otherwise returns false. + * + * Key is allowed to be NULL in order to save an extra branch on the + * fastpath. returns false in this case. + */ +JEMALLOC_ALWAYS_INLINE bool +rtree_szind_slab_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, szind_t *r_szind, bool *r_slab) { + rtree_leaf_elm_t *elm; + + size_t slot = rtree_cache_direct_map(key); + uintptr_t leafkey = rtree_leafkey(key); + assert(leafkey != RTREE_LEAFKEY_INVALID); + + if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) { + rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; + assert(leaf != NULL); + uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); + elm = &leaf[subkey]; + +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, + elm, true); + *r_szind = rtree_leaf_elm_bits_szind_get(bits); + *r_slab = rtree_leaf_elm_bits_slab_get(bits); +#else + *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, true); + *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, true); +#endif + return true; + } else { + return false; + } +} +JEMALLOC_ALWAYS_INLINE bool +rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return true; + } +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + *r_szind = rtree_leaf_elm_bits_szind_get(bits); + *r_slab = rtree_leaf_elm_bits_slab_get(bits); +#else + *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); + *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent); +#endif + return false; +} + +static inline void +rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, szind_t szind, bool slab) { + assert(!slab || szind < SC_NBINS); + + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); + rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab); +} + +static inline void +rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); + assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) != + NULL); + rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false); +} + +#endif /* JEMALLOC_INTERNAL_RTREE_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/rtree_tsd.h b/deps/jemalloc/include/jemalloc/internal/rtree_tsd.h new file mode 100644 index 0000000..562e292 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/rtree_tsd.h @@ -0,0 +1,50 @@ +#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H +#define JEMALLOC_INTERNAL_RTREE_CTX_H + +/* + * Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each + * entry supports an entire leaf, so the cache hit rate is typically high even + * with a small number of entries. In rare cases extent activity will straddle + * the boundary between two leaf nodes. Furthermore, an arena may use a + * combination of dss and mmap. Note that as memory usage grows past the amount + * that this cache can directly cover, the cache will become less effective if + * locality of reference is low, but the consequence is merely cache misses + * while traversing the tree nodes. + * + * The L1 direct mapped cache offers consistent and low cost on cache hit. + * However collision could affect hit rate negatively. This is resolved by + * combining with a L2 LRU cache, which requires linear search and re-ordering + * on access but suffers no collision. Note that, the cache will itself suffer + * cache misses if made overly large, plus the cost of linear search in the LRU + * cache. + */ +#define RTREE_CTX_LG_NCACHE 4 +#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE) +#define RTREE_CTX_NCACHE_L2 8 + +/* + * Zero initializer required for tsd initialization only. Proper initialization + * done via rtree_ctx_data_init(). + */ +#define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}} + + +typedef struct rtree_leaf_elm_s rtree_leaf_elm_t; + +typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t; +struct rtree_ctx_cache_elm_s { + uintptr_t leafkey; + rtree_leaf_elm_t *leaf; +}; + +typedef struct rtree_ctx_s rtree_ctx_t; +struct rtree_ctx_s { + /* Direct mapped cache. */ + rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE]; + /* L2 LRU cache. */ + rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2]; +}; + +void rtree_ctx_data_init(rtree_ctx_t *ctx); + +#endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/safety_check.h b/deps/jemalloc/include/jemalloc/internal/safety_check.h new file mode 100644 index 0000000..53339ac --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/safety_check.h @@ -0,0 +1,26 @@ +#ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H +#define JEMALLOC_INTERNAL_SAFETY_CHECK_H + +void safety_check_fail(const char *format, ...); +/* Can set to NULL for a default. */ +void safety_check_set_abort(void (*abort_fn)()); + +JEMALLOC_ALWAYS_INLINE void +safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) { + assert(usize < bumped_usize); + for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) { + *((unsigned char *)ptr + i) = 0xBC; + } +} + +JEMALLOC_ALWAYS_INLINE void +safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize) +{ + for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) { + if (unlikely(*((unsigned char *)ptr + i) != 0xBC)) { + safety_check_fail("Use after free error\n"); + } + } +} + +#endif /*JEMALLOC_INTERNAL_SAFETY_CHECK_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/sc.h b/deps/jemalloc/include/jemalloc/internal/sc.h new file mode 100644 index 0000000..9a099d8 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/sc.h @@ -0,0 +1,333 @@ +#ifndef JEMALLOC_INTERNAL_SC_H +#define JEMALLOC_INTERNAL_SC_H + +#include "jemalloc/internal/jemalloc_internal_types.h" + +/* + * Size class computations: + * + * These are a little tricky; we'll first start by describing how things + * generally work, and then describe some of the details. + * + * Ignore the first few size classes for a moment. We can then split all the + * remaining size classes into groups. The size classes in a group are spaced + * such that they cover allocation request sizes in a power-of-2 range. The + * power of two is called the base of the group, and the size classes in it + * satisfy allocations in the half-open range (base, base * 2]. There are + * SC_NGROUP size classes in each group, equally spaced in the range, so that + * each one covers allocations for base / SC_NGROUP possible allocation sizes. + * We call that value (base / SC_NGROUP) the delta of the group. Each size class + * is delta larger than the one before it (including the initial size class in a + * group, which is delta larger than base, the largest size class in the + * previous group). + * To make the math all work out nicely, we require that SC_NGROUP is a power of + * two, and define it in terms of SC_LG_NGROUP. We'll often talk in terms of + * lg_base and lg_delta. For each of these groups then, we have that + * lg_delta == lg_base - SC_LG_NGROUP. + * The size classes in a group with a given lg_base and lg_delta (which, recall, + * can be computed from lg_base for these groups) are therefore: + * base + 1 * delta + * which covers allocations in (base, base + 1 * delta] + * base + 2 * delta + * which covers allocations in (base + 1 * delta, base + 2 * delta]. + * base + 3 * delta + * which covers allocations in (base + 2 * delta, base + 3 * delta]. + * ... + * base + SC_NGROUP * delta ( == 2 * base) + * which covers allocations in (base + (SC_NGROUP - 1) * delta, 2 * base]. + * (Note that currently SC_NGROUP is always 4, so the "..." is empty in + * practice.) + * Note that the last size class in the group is the next power of two (after + * base), so that we've set up the induction correctly for the next group's + * selection of delta. + * + * Now, let's start considering the first few size classes. Two extra constants + * come into play here: LG_QUANTUM and SC_LG_TINY_MIN. LG_QUANTUM ensures + * correct platform alignment; all objects of size (1 << LG_QUANTUM) or larger + * are at least (1 << LG_QUANTUM) aligned; this can be used to ensure that we + * never return improperly aligned memory, by making (1 << LG_QUANTUM) equal the + * highest required alignment of a platform. For allocation sizes smaller than + * (1 << LG_QUANTUM) though, we can be more relaxed (since we don't support + * platforms with types with alignment larger than their size). To allow such + * allocations (without wasting space unnecessarily), we introduce tiny size + * classes; one per power of two, up until we hit the quantum size. There are + * therefore LG_QUANTUM - SC_LG_TINY_MIN such size classes. + * + * Next, we have a size class of size (1 << LG_QUANTUM). This can't be the + * start of a group in the sense we described above (covering a power of two + * range) since, if we divided into it to pick a value of delta, we'd get a + * delta smaller than (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which + * is against the rules. + * + * The first base we can divide by SC_NGROUP while still being at least + * (1 << LG_QUANTUM) is SC_NGROUP * (1 << LG_QUANTUM). We can get there by + * having SC_NGROUP size classes, spaced (1 << LG_QUANTUM) apart. These size + * classes are: + * 1 * (1 << LG_QUANTUM) + * 2 * (1 << LG_QUANTUM) + * 3 * (1 << LG_QUANTUM) + * ... (although, as above, this "..." is empty in practice) + * SC_NGROUP * (1 << LG_QUANTUM). + * + * There are SC_NGROUP of these size classes, so we can regard it as a sort of + * pseudo-group, even though it spans multiple powers of 2, is divided + * differently, and both starts and ends on a power of 2 (as opposed to just + * ending). SC_NGROUP is itself a power of two, so the first group after the + * pseudo-group has the power-of-two base SC_NGROUP * (1 << LG_QUANTUM), for a + * lg_base of LG_QUANTUM + SC_LG_NGROUP. We can divide this base into SC_NGROUP + * sizes without violating our LG_QUANTUM requirements, so we can safely set + * lg_delta = lg_base - SC_LG_GROUP (== LG_QUANTUM). + * + * So, in order, the size classes are: + * + * Tiny size classes: + * - Count: LG_QUANTUM - SC_LG_TINY_MIN. + * - Sizes: + * 1 << SC_LG_TINY_MIN + * 1 << (SC_LG_TINY_MIN + 1) + * 1 << (SC_LG_TINY_MIN + 2) + * ... + * 1 << (LG_QUANTUM - 1) + * + * Initial pseudo-group: + * - Count: SC_NGROUP + * - Sizes: + * 1 * (1 << LG_QUANTUM) + * 2 * (1 << LG_QUANTUM) + * 3 * (1 << LG_QUANTUM) + * ... + * SC_NGROUP * (1 << LG_QUANTUM) + * + * Regular group 0: + * - Count: SC_NGROUP + * - Sizes: + * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP and lg_delta of + * lg_base - SC_LG_NGROUP) + * (1 << lg_base) + 1 * (1 << lg_delta) + * (1 << lg_base) + 2 * (1 << lg_delta) + * (1 << lg_base) + 3 * (1 << lg_delta) + * ... + * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ] + * + * Regular group 1: + * - Count: SC_NGROUP + * - Sizes: + * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + 1 and lg_delta of + * lg_base - SC_LG_NGROUP) + * (1 << lg_base) + 1 * (1 << lg_delta) + * (1 << lg_base) + 2 * (1 << lg_delta) + * (1 << lg_base) + 3 * (1 << lg_delta) + * ... + * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ] + * + * ... + * + * Regular group N: + * - Count: SC_NGROUP + * - Sizes: + * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + N and lg_delta of + * lg_base - SC_LG_NGROUP) + * (1 << lg_base) + 1 * (1 << lg_delta) + * (1 << lg_base) + 2 * (1 << lg_delta) + * (1 << lg_base) + 3 * (1 << lg_delta) + * ... + * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ] + * + * + * Representation of metadata: + * To make the math easy, we'll mostly work in lg quantities. We record lg_base, + * lg_delta, and ndelta (i.e. number of deltas above the base) on a + * per-size-class basis, and maintain the invariant that, across all size + * classes, size == (1 << lg_base) + ndelta * (1 << lg_delta). + * + * For regular groups (i.e. those with lg_base >= LG_QUANTUM + SC_LG_NGROUP), + * lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP. + * + * For the initial tiny size classes (if any), lg_base is lg(size class size). + * lg_delta is lg_base for the first size class, and lg_base - 1 for all + * subsequent ones. ndelta is always 0. + * + * For the pseudo-group, if there are no tiny size classes, then we set + * lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0 + * to SC_NGROUP - 1. (Note that delta == base, so base + (SC_NGROUP - 1) * delta + * is just SC_NGROUP * base, or (1 << (SC_LG_NGROUP + LG_QUANTUM)), so we do + * indeed get a power of two that way). If there *are* tiny size classes, then + * the first size class needs to have lg_delta relative to the largest tiny size + * class. We therefore set lg_base == LG_QUANTUM - 1, + * lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the + * pseudo-group the same. + * + * + * Other terminology: + * "Small" size classes mean those that are allocated out of bins, which is the + * same as those that are slab allocated. + * "Large" size classes are those that are not small. The cutoff for counting as + * large is page size * group size. + */ + +/* + * Size class N + (1 << SC_LG_NGROUP) twice the size of size class N. + */ +#define SC_LG_NGROUP 2 +#define SC_LG_TINY_MIN 3 + +#if SC_LG_TINY_MIN == 0 +/* The div module doesn't support division by 1, which this would require. */ +#error "Unsupported LG_TINY_MIN" +#endif + +/* + * The definitions below are all determined by the above settings and system + * characteristics. + */ +#define SC_NGROUP (1ULL << SC_LG_NGROUP) +#define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8) +#define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN) +#define SC_LG_TINY_MAXCLASS (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1) +#define SC_NPSEUDO SC_NGROUP +#define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP) +/* + * We cap allocations to be less than 2 ** (ptr_bits - 1), so the highest base + * we need is 2 ** (ptr_bits - 2). (This also means that the last group is 1 + * size class shorter than the others). + * We could probably save some space in arenas by capping this at LG_VADDR size. + */ +#define SC_LG_BASE_MAX (SC_PTR_BITS - 2) +#define SC_NREGULAR (SC_NGROUP * \ + (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1) +#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR) + +/* The number of size classes that are a multiple of the page size. */ +#define SC_NPSIZES ( \ + /* Start with all the size classes. */ \ + SC_NSIZES \ + /* Subtract out those groups with too small a base. */ \ + - (LG_PAGE - 1 - SC_LG_FIRST_REGULAR_BASE) * SC_NGROUP \ + /* And the pseudo-group. */ \ + - SC_NPSEUDO \ + /* And the tiny group. */ \ + - SC_NTINY \ + /* Sizes where ndelta*delta is not a multiple of the page size. */ \ + - (SC_LG_NGROUP * SC_NGROUP)) +/* + * Note that the last line is computed as the sum of the second column in the + * following table: + * lg(base) | count of sizes to exclude + * ------------------------------|----------------------------- + * LG_PAGE - 1 | SC_NGROUP - 1 + * LG_PAGE | SC_NGROUP - 1 + * LG_PAGE + 1 | SC_NGROUP - 2 + * LG_PAGE + 2 | SC_NGROUP - 4 + * ... | ... + * LG_PAGE + (SC_LG_NGROUP - 1) | SC_NGROUP - (SC_NGROUP / 2) + */ + +/* + * We declare a size class is binnable if size < page size * group. Or, in other + * words, lg(size) < lg(page size) + lg(group size). + */ +#define SC_NBINS ( \ + /* Sub-regular size classes. */ \ + SC_NTINY + SC_NPSEUDO \ + /* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \ + + SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE) \ + /* Last SC of the last group hits the bound exactly; exclude it. */ \ + - 1) + +/* + * The size2index_tab lookup table uses uint8_t to encode each bin index, so we + * cannot support more than 256 small size classes. + */ +#if (SC_NBINS > 256) +# error "Too many small size classes" +#endif + +/* The largest size class in the lookup table. */ +#define SC_LOOKUP_MAXCLASS ((size_t)1 << 12) + +/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */ +#define SC_SMALL_MAX_BASE ((size_t)1 << (LG_PAGE + SC_LG_NGROUP - 1)) +#define SC_SMALL_MAX_DELTA ((size_t)1 << (LG_PAGE - 1)) + +/* The largest size class allocated out of a slab. */ +#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \ + + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA) + +/* The smallest size class not allocated out of a slab. */ +#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP)) +#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP) + +/* Internal; only used for the definition of SC_LARGE_MAXCLASS. */ +#define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2)) +#define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP)) + +/* The largest size class supported. */ +#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA) + +typedef struct sc_s sc_t; +struct sc_s { + /* Size class index, or -1 if not a valid size class. */ + int index; + /* Lg group base size (no deltas added). */ + int lg_base; + /* Lg delta to previous size class. */ + int lg_delta; + /* Delta multiplier. size == 1<data) / sizeof(size_t)]; \ + buf[sizeof(buf) / sizeof(size_t) - 1] = 0; \ + memcpy(buf, src, sizeof(type)); \ + size_t old_seq = atomic_load_zu(&dst->seq, ATOMIC_RELAXED); \ + atomic_store_zu(&dst->seq, old_seq + 1, ATOMIC_RELAXED); \ + atomic_fence(ATOMIC_RELEASE); \ + for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \ + atomic_store_zu(&dst->data[i], buf[i], ATOMIC_RELAXED); \ + } \ + atomic_store_zu(&dst->seq, old_seq + 2, ATOMIC_RELEASE); \ +} \ + \ +/* Returns whether or not the read was consistent. */ \ +static inline bool \ +seq_try_load_##short_type(type *dst, seq_##short_type##_t *src) { \ + size_t buf[sizeof(src->data) / sizeof(size_t)]; \ + size_t seq1 = atomic_load_zu(&src->seq, ATOMIC_ACQUIRE); \ + if (seq1 % 2 != 0) { \ + return false; \ + } \ + for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) { \ + buf[i] = atomic_load_zu(&src->data[i], ATOMIC_RELAXED); \ + } \ + atomic_fence(ATOMIC_ACQUIRE); \ + size_t seq2 = atomic_load_zu(&src->seq, ATOMIC_RELAXED); \ + if (seq1 != seq2) { \ + return false; \ + } \ + memcpy(dst, buf, sizeof(type)); \ + return true; \ +} + +#endif /* JEMALLOC_INTERNAL_SEQ_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/smoothstep.h b/deps/jemalloc/include/jemalloc/internal/smoothstep.h new file mode 100644 index 0000000..2e14430 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/smoothstep.h @@ -0,0 +1,232 @@ +#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H +#define JEMALLOC_INTERNAL_SMOOTHSTEP_H + +/* + * This file was generated by the following command: + * sh smoothstep.sh smoother 200 24 3 15 + */ +/******************************************************************************/ + +/* + * This header defines a precomputed table based on the smoothstep family of + * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 + * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so + * that floating point math can be avoided. + * + * 3 2 + * smoothstep(x) = -2x + 3x + * + * 5 4 3 + * smootherstep(x) = 6x - 15x + 10x + * + * 7 6 5 4 + * smootheststep(x) = -20x + 70x - 84x + 35x + */ + +#define SMOOTHSTEP_VARIANT "smoother" +#define SMOOTHSTEP_NSTEPS 200 +#define SMOOTHSTEP_BFP 24 +#define SMOOTHSTEP \ + /* STEP(step, h, x, y) */ \ + STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ + STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ + STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ + STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ + STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ + STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ + STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ + STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ + STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ + STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ + STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ + STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ + STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ + STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ + STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ + STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ + STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ + STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ + STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ + STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ + STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ + STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ + STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ + STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ + STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ + STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ + STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ + STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ + STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ + STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ + STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ + STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ + STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ + STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ + STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ + STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ + STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ + STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ + STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ + STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ + STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ + STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ + STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ + STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ + STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ + STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ + STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ + STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ + STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ + STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ + STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ + STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ + STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ + STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ + STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ + STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ + STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ + STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ + STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ + STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ + STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ + STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ + STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ + STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ + STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ + STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ + STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ + STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ + STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ + STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ + STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ + STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ + STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ + STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ + STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ + STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ + STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ + STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ + STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ + STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ + STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ + STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ + STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ + STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ + STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ + STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ + STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ + STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ + STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ + STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ + STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ + STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ + STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ + STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ + STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ + STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ + STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ + STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ + STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ + STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ + STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ + STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ + STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ + STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ + STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ + STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ + STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ + STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ + STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ + STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ + STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ + STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ + STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ + STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ + STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ + STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ + STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ + STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ + STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ + STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ + STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ + STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ + STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ + STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ + STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ + STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ + STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ + STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ + STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ + STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ + STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ + STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ + STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ + STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ + STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ + STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ + STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ + STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ + STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ + STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ + STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ + STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ + STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ + STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ + STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ + STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ + STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ + STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ + STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ + STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ + STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ + STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ + STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ + STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ + STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ + STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ + STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ + STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ + STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ + STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ + STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ + STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ + STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ + STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ + STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ + STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ + STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ + STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ + STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ + STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ + STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ + STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ + STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ + STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ + STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ + STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ + STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ + STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ + STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ + STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ + STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ + STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ + STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ + STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ + STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ + STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ + STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ + STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ + STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ + STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ + STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ + STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ + STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ + STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ + STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ + STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ + STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ + STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ + STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ + STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \ + +#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/smoothstep.sh b/deps/jemalloc/include/jemalloc/internal/smoothstep.sh new file mode 100755 index 0000000..65de97b --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/smoothstep.sh @@ -0,0 +1,101 @@ +#!/bin/sh +# +# Generate a discrete lookup table for a sigmoid function in the smoothstep +# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table +# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode +# the entries using a binary fixed point representation. +# +# Usage: smoothstep.sh +# +# is in {smooth, smoother, smoothest}. +# must be greater than zero. +# must be in [0..62]; reasonable values are roughly [10..30]. +# is x decimal precision. +# is y decimal precision. + +#set -x + +cmd="sh smoothstep.sh $*" +variant=$1 +nsteps=$2 +bfp=$3 +xprec=$4 +yprec=$5 + +case "${variant}" in + smooth) + ;; + smoother) + ;; + smoothest) + ;; + *) + echo "Unsupported variant" + exit 1 + ;; +esac + +smooth() { + step=$1 + y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` + h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` +} + +smoother() { + step=$1 + y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` + h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` +} + +smoothest() { + step=$1 + y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` + h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` +} + +cat <iteration < 5) { + for (i = 0; i < (1U << spin->iteration); i++) { + spin_cpu_spinwait(); + } + spin->iteration++; + } else { +#ifdef _WIN32 + SwitchToThread(); +#else + sched_yield(); +#endif + } +} + +#undef SPIN_INLINE + +#endif /* JEMALLOC_INTERNAL_SPIN_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/stats.h b/deps/jemalloc/include/jemalloc/internal/stats.h new file mode 100644 index 0000000..3b9e0ea --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/stats.h @@ -0,0 +1,31 @@ +#ifndef JEMALLOC_INTERNAL_STATS_H +#define JEMALLOC_INTERNAL_STATS_H + +/* OPTION(opt, var_name, default, set_value_to) */ +#define STATS_PRINT_OPTIONS \ + OPTION('J', json, false, true) \ + OPTION('g', general, true, false) \ + OPTION('m', merged, config_stats, false) \ + OPTION('d', destroyed, config_stats, false) \ + OPTION('a', unmerged, config_stats, false) \ + OPTION('b', bins, true, false) \ + OPTION('l', large, true, false) \ + OPTION('x', mutex, true, false) \ + OPTION('e', extents, true, false) + +enum { +#define OPTION(o, v, d, s) stats_print_option_num_##v, + STATS_PRINT_OPTIONS +#undef OPTION + stats_print_tot_num_options +}; + +/* Options for stats_print. */ +extern bool opt_stats_print; +extern char opt_stats_print_opts[stats_print_tot_num_options+1]; + +/* Implements je_malloc_stats_print. */ +void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts); + +#endif /* JEMALLOC_INTERNAL_STATS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/sz.h b/deps/jemalloc/include/jemalloc/internal/sz.h new file mode 100644 index 0000000..68e558a --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/sz.h @@ -0,0 +1,318 @@ +#ifndef JEMALLOC_INTERNAL_SIZE_H +#define JEMALLOC_INTERNAL_SIZE_H + +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/util.h" + +/* + * sz module: Size computations. + * + * Some abbreviations used here: + * p: Page + * ind: Index + * s, sz: Size + * u: Usable size + * a: Aligned + * + * These are not always used completely consistently, but should be enough to + * interpret function names. E.g. sz_psz2ind converts page size to page size + * index; sz_sa2u converts a (size, alignment) allocation request to the usable + * size that would result from such an allocation. + */ + +/* + * sz_pind2sz_tab encodes the same information as could be computed by + * sz_pind2sz_compute(). + */ +extern size_t sz_pind2sz_tab[SC_NPSIZES + 1]; +/* + * sz_index2size_tab encodes the same information as could be computed (at + * unacceptable cost in some code paths) by sz_index2size_compute(). + */ +extern size_t sz_index2size_tab[SC_NSIZES]; +/* + * sz_size2index_tab is a compact lookup table that rounds request sizes up to + * size classes. In order to reduce cache footprint, the table is compressed, + * and all accesses are via sz_size2index(). + */ +extern uint8_t sz_size2index_tab[]; + +static const size_t sz_large_pad = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + PAGE +#else + 0 +#endif + ; + +extern void sz_boot(const sc_data_t *sc_data); + +JEMALLOC_ALWAYS_INLINE pszind_t +sz_psz2ind(size_t psz) { + if (unlikely(psz > SC_LARGE_MAXCLASS)) { + return SC_NPSIZES; + } + pszind_t x = lg_floor((psz<<1)-1); + pszind_t shift = (x < SC_LG_NGROUP + LG_PAGE) ? + 0 : x - (SC_LG_NGROUP + LG_PAGE); + pszind_t grp = shift << SC_LG_NGROUP; + + pszind_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ? + LG_PAGE : x - SC_LG_NGROUP - 1; + + size_t delta_inverse_mask = ZU(-1) << lg_delta; + pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << SC_LG_NGROUP) - 1); + + pszind_t ind = grp + mod; + return ind; +} + +static inline size_t +sz_pind2sz_compute(pszind_t pind) { + if (unlikely(pind == SC_NPSIZES)) { + return SC_LARGE_MAXCLASS + PAGE; + } + size_t grp = pind >> SC_LG_NGROUP; + size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_PAGE + (SC_LG_NGROUP-1))) << grp) + & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_PAGE-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t sz = grp_size + mod_size; + return sz; +} + +static inline size_t +sz_pind2sz_lookup(pszind_t pind) { + size_t ret = (size_t)sz_pind2sz_tab[pind]; + assert(ret == sz_pind2sz_compute(pind)); + return ret; +} + +static inline size_t +sz_pind2sz(pszind_t pind) { + assert(pind < SC_NPSIZES + 1); + return sz_pind2sz_lookup(pind); +} + +static inline size_t +sz_psz2u(size_t psz) { + if (unlikely(psz > SC_LARGE_MAXCLASS)) { + return SC_LARGE_MAXCLASS + PAGE; + } + size_t x = lg_floor((psz<<1)-1); + size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ? + LG_PAGE : x - SC_LG_NGROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (psz + delta_mask) & ~delta_mask; + return usize; +} + +static inline szind_t +sz_size2index_compute(size_t size) { + if (unlikely(size > SC_LARGE_MAXCLASS)) { + return SC_NSIZES; + } + + if (size == 0) { + return 0; + } +#if (SC_NTINY != 0) + if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) { + szind_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1; + szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); + return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); + } +#endif + { + szind_t x = lg_floor((size<<1)-1); + szind_t shift = (x < SC_LG_NGROUP + LG_QUANTUM) ? 0 : + x - (SC_LG_NGROUP + LG_QUANTUM); + szind_t grp = shift << SC_LG_NGROUP; + + szind_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - SC_LG_NGROUP - 1; + + size_t delta_inverse_mask = ZU(-1) << lg_delta; + szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << SC_LG_NGROUP) - 1); + + szind_t index = SC_NTINY + grp + mod; + return index; + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +sz_size2index_lookup(size_t size) { + assert(size <= SC_LOOKUP_MAXCLASS); + szind_t ret = (sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1) + >> SC_LG_TINY_MIN]); + assert(ret == sz_size2index_compute(size)); + return ret; +} + +JEMALLOC_ALWAYS_INLINE szind_t +sz_size2index(size_t size) { + if (likely(size <= SC_LOOKUP_MAXCLASS)) { + return sz_size2index_lookup(size); + } + return sz_size2index_compute(size); +} + +static inline size_t +sz_index2size_compute(szind_t index) { +#if (SC_NTINY > 0) + if (index < SC_NTINY) { + return (ZU(1) << (SC_LG_TINY_MAXCLASS - SC_NTINY + 1 + index)); + } +#endif + { + size_t reduced_index = index - SC_NTINY; + size_t grp = reduced_index >> SC_LG_NGROUP; + size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) - + 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_QUANTUM + + (SC_LG_NGROUP-1))) << grp) & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_QUANTUM-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t usize = grp_size + mod_size; + return usize; + } +} + +JEMALLOC_ALWAYS_INLINE size_t +sz_index2size_lookup(szind_t index) { + size_t ret = (size_t)sz_index2size_tab[index]; + assert(ret == sz_index2size_compute(index)); + return ret; +} + +JEMALLOC_ALWAYS_INLINE size_t +sz_index2size(szind_t index) { + assert(index < SC_NSIZES); + return sz_index2size_lookup(index); +} + +JEMALLOC_ALWAYS_INLINE size_t +sz_s2u_compute(size_t size) { + if (unlikely(size > SC_LARGE_MAXCLASS)) { + return 0; + } + + if (size == 0) { + size++; + } +#if (SC_NTINY > 0) + if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) { + size_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1; + size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); + return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : + (ZU(1) << lg_ceil)); + } +#endif + { + size_t x = lg_floor((size<<1)-1); + size_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - SC_LG_NGROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (size + delta_mask) & ~delta_mask; + return usize; + } +} + +JEMALLOC_ALWAYS_INLINE size_t +sz_s2u_lookup(size_t size) { + size_t ret = sz_index2size_lookup(sz_size2index_lookup(size)); + + assert(ret == sz_s2u_compute(size)); + return ret; +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size. + */ +JEMALLOC_ALWAYS_INLINE size_t +sz_s2u(size_t size) { + if (likely(size <= SC_LOOKUP_MAXCLASS)) { + return sz_s2u_lookup(size); + } + return sz_s2u_compute(size); +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size and alignment. + */ +JEMALLOC_ALWAYS_INLINE size_t +sz_sa2u(size_t size, size_t alignment) { + size_t usize; + + assert(alignment != 0 && ((alignment - 1) & alignment) == 0); + + /* Try for a small size class. */ + if (size <= SC_SMALL_MAXCLASS && alignment < PAGE) { + /* + * Round size up to the nearest multiple of alignment. + * + * This done, we can take advantage of the fact that for each + * small size class, every object is aligned at the smallest + * power of two that is non-zero in the base two representation + * of the size. For example: + * + * Size | Base 2 | Minimum alignment + * -----+----------+------------------ + * 96 | 1100000 | 32 + * 144 | 10100000 | 32 + * 192 | 11000000 | 64 + */ + usize = sz_s2u(ALIGNMENT_CEILING(size, alignment)); + if (usize < SC_LARGE_MINCLASS) { + return usize; + } + } + + /* Large size class. Beware of overflow. */ + + if (unlikely(alignment > SC_LARGE_MAXCLASS)) { + return 0; + } + + /* Make sure result is a large size class. */ + if (size <= SC_LARGE_MINCLASS) { + usize = SC_LARGE_MINCLASS; + } else { + usize = sz_s2u(size); + if (usize < size) { + /* size_t overflow. */ + return 0; + } + } + + /* + * Calculate the multi-page mapping that large_palloc() would need in + * order to guarantee the alignment. + */ + if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) { + /* size_t overflow. */ + return 0; + } + return usize; +} + +#endif /* JEMALLOC_INTERNAL_SIZE_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/tcache_externs.h b/deps/jemalloc/include/jemalloc/internal/tcache_externs.h new file mode 100644 index 0000000..d63eafd --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/tcache_externs.h @@ -0,0 +1,53 @@ +#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H +#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H + +extern bool opt_tcache; +extern ssize_t opt_lg_tcache_max; + +extern cache_bin_info_t *tcache_bin_info; + +/* + * Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more + * large-object bins. + */ +extern unsigned nhbins; + +/* Maximum cached size class. */ +extern size_t tcache_maxclass; + +/* + * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and + * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are + * completely disjoint from this data structure. tcaches starts off as a sparse + * array, so it has no physical memory footprint until individual pages are + * touched. This allows the entire array to be allocated the first time an + * explicit tcache is created without a disproportionate impact on memory usage. + */ +extern tcaches_t *tcaches; + +size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); +void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); +void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + cache_bin_t *tbin, szind_t binind, bool *tcache_success); +void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, + szind_t binind, unsigned rem); +void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, + unsigned rem, tcache_t *tcache); +void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, + arena_t *arena); +tcache_t *tcache_create_explicit(tsd_t *tsd); +void tcache_cleanup(tsd_t *tsd); +void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); +bool tcaches_create(tsd_t *tsd, unsigned *r_ind); +void tcaches_flush(tsd_t *tsd, unsigned ind); +void tcaches_destroy(tsd_t *tsd, unsigned ind); +bool tcache_boot(tsdn_t *tsdn); +void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); +void tcache_prefork(tsdn_t *tsdn); +void tcache_postfork_parent(tsdn_t *tsdn); +void tcache_postfork_child(tsdn_t *tsdn); +void tcache_flush(tsd_t *tsd); +bool tsd_tcache_data_init(tsd_t *tsd); +bool tsd_tcache_enabled_data_init(tsd_t *tsd); + +#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/tcache_inlines.h b/deps/jemalloc/include/jemalloc/internal/tcache_inlines.h new file mode 100644 index 0000000..5eca20e --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/tcache_inlines.h @@ -0,0 +1,227 @@ +#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H +#define JEMALLOC_INTERNAL_TCACHE_INLINES_H + +#include "jemalloc/internal/bin.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/util.h" + +static inline bool +tcache_enabled_get(tsd_t *tsd) { + return tsd_tcache_enabled_get(tsd); +} + +static inline void +tcache_enabled_set(tsd_t *tsd, bool enabled) { + bool was_enabled = tsd_tcache_enabled_get(tsd); + + if (!was_enabled && enabled) { + tsd_tcache_data_init(tsd); + } else if (was_enabled && !enabled) { + tcache_cleanup(tsd); + } + /* Commit the state last. Above calls check current state. */ + tsd_tcache_enabled_set(tsd, enabled); + tsd_slow_update(tsd); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_event(tsd_t *tsd, tcache_t *tcache) { + if (TCACHE_GC_INCR == 0) { + return; + } + + if (unlikely(ticker_tick(&tcache->gc_ticker))) { + tcache_event_hard(tsd, tcache); + } +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, + size_t size, szind_t binind, bool zero, bool slow_path) { + void *ret; + cache_bin_t *bin; + bool tcache_success; + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + assert(binind < SC_NBINS); + bin = tcache_small_bin_get(tcache, binind); + ret = cache_bin_alloc_easy(bin, &tcache_success); + assert(tcache_success == (ret != NULL)); + if (unlikely(!tcache_success)) { + bool tcache_hard_success; + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) { + return NULL; + } + + ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, + bin, binind, &tcache_hard_success); + if (tcache_hard_success == false) { + return NULL; + } + } + + assert(ret); + /* + * Only compute usize if required. The checks in the following if + * statement are all static. + */ + if (config_prof || (slow_path && config_fill) || unlikely(zero)) { + usize = sz_index2size(binind); + assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); + } + + if (likely(!zero)) { + if (slow_path && config_fill) { + if (unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, &bin_infos[binind], + false); + } else if (unlikely(opt_zero)) { + memset(ret, 0, usize); + } + } + } else { + if (slow_path && config_fill && unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, &bin_infos[binind], true); + } + memset(ret, 0, usize); + } + + if (config_stats) { + bin->tstats.nrequests++; + } + if (config_prof) { + tcache->prof_accumbytes += usize; + } + tcache_event(tsd, tcache); + return ret; +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, + szind_t binind, bool zero, bool slow_path) { + void *ret; + cache_bin_t *bin; + bool tcache_success; + + assert(binind >= SC_NBINS &&binind < nhbins); + bin = tcache_large_bin_get(tcache, binind); + ret = cache_bin_alloc_easy(bin, &tcache_success); + assert(tcache_success == (ret != NULL)); + if (unlikely(!tcache_success)) { + /* + * Only allocate one large object at a time, because it's quite + * expensive to create one and not use it. + */ + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) { + return NULL; + } + + ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero); + if (ret == NULL) { + return NULL; + } + } else { + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + /* Only compute usize on demand */ + if (config_prof || (slow_path && config_fill) || + unlikely(zero)) { + usize = sz_index2size(binind); + assert(usize <= tcache_maxclass); + } + + if (likely(!zero)) { + if (slow_path && config_fill) { + if (unlikely(opt_junk_alloc)) { + memset(ret, JEMALLOC_ALLOC_JUNK, + usize); + } else if (unlikely(opt_zero)) { + memset(ret, 0, usize); + } + } + } else { + memset(ret, 0, usize); + } + + if (config_stats) { + bin->tstats.nrequests++; + } + if (config_prof) { + tcache->prof_accumbytes += usize; + } + } + + tcache_event(tsd, tcache); + return ret; +} + +JEMALLOC_ALWAYS_INLINE void +tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, + bool slow_path) { + cache_bin_t *bin; + cache_bin_info_t *bin_info; + + assert(tcache_salloc(tsd_tsdn(tsd), ptr) + <= SC_SMALL_MAXCLASS); + + if (slow_path && config_fill && unlikely(opt_junk_free)) { + arena_dalloc_junk_small(ptr, &bin_infos[binind]); + } + + bin = tcache_small_bin_get(tcache, binind); + bin_info = &tcache_bin_info[binind]; + if (unlikely(!cache_bin_dalloc_easy(bin, bin_info, ptr))) { + tcache_bin_flush_small(tsd, tcache, bin, binind, + (bin_info->ncached_max >> 1)); + bool ret = cache_bin_dalloc_easy(bin, bin_info, ptr); + assert(ret); + } + + tcache_event(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, + bool slow_path) { + cache_bin_t *bin; + cache_bin_info_t *bin_info; + + assert(tcache_salloc(tsd_tsdn(tsd), ptr) + > SC_SMALL_MAXCLASS); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); + + if (slow_path && config_fill && unlikely(opt_junk_free)) { + large_dalloc_junk(ptr, sz_index2size(binind)); + } + + bin = tcache_large_bin_get(tcache, binind); + bin_info = &tcache_bin_info[binind]; + if (unlikely(bin->ncached == bin_info->ncached_max)) { + tcache_bin_flush_large(tsd, bin, binind, + (bin_info->ncached_max >> 1), tcache); + } + assert(bin->ncached < bin_info->ncached_max); + bin->ncached++; + *(bin->avail - bin->ncached) = ptr; + + tcache_event(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE tcache_t * +tcaches_get(tsd_t *tsd, unsigned ind) { + tcaches_t *elm = &tcaches[ind]; + if (unlikely(elm->tcache == NULL)) { + malloc_printf(": invalid tcache id (%u).\n", ind); + abort(); + } else if (unlikely(elm->tcache == TCACHES_ELM_NEED_REINIT)) { + elm->tcache = tcache_create_explicit(tsd); + } + return elm->tcache; +} + +#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/tcache_structs.h b/deps/jemalloc/include/jemalloc/internal/tcache_structs.h new file mode 100644 index 0000000..172ef90 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/tcache_structs.h @@ -0,0 +1,70 @@ +#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H +#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H + +#include "jemalloc/internal/cache_bin.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/tsd_types.h" + +/* Various uses of this struct need it to be a named type. */ +typedef ql_elm(tsd_t) tsd_link_t; + +struct tcache_s { + /* + * To minimize our cache-footprint, we put the frequently accessed data + * together at the start of this struct. + */ + + /* Cleared after arena_prof_accum(). */ + uint64_t prof_accumbytes; + /* Drives incremental GC. */ + ticker_t gc_ticker; + /* + * The pointer stacks associated with bins follow as a contiguous array. + * During tcache initialization, the avail pointer in each element of + * tbins is initialized to point to the proper offset within this array. + */ + cache_bin_t bins_small[SC_NBINS]; + + /* + * This data is less hot; we can be a little less careful with our + * footprint here. + */ + /* Lets us track all the tcaches in an arena. */ + ql_elm(tcache_t) link; + + /* Logically scoped to tsd, but put here for cache layout reasons. */ + ql_elm(tsd_t) tsd_link; + bool in_hook; + + /* + * The descriptor lets the arena find our cache bins without seeing the + * tcache definition. This enables arenas to aggregate stats across + * tcaches without having a tcache dependency. + */ + cache_bin_array_descriptor_t cache_bin_array_descriptor; + + /* The arena this tcache is associated with. */ + arena_t *arena; + /* Next bin to GC. */ + szind_t next_gc_bin; + /* For small bins, fill (ncached_max >> lg_fill_div). */ + uint8_t lg_fill_div[SC_NBINS]; + /* + * We put the cache bins for large size classes at the end of the + * struct, since some of them might not get used. This might end up + * letting us avoid touching an extra page if we don't have to. + */ + cache_bin_t bins_large[SC_NSIZES-SC_NBINS]; +}; + +/* Linkage for list of available (previously used) explicit tcache IDs. */ +struct tcaches_s { + union { + tcache_t *tcache; + tcaches_t *next; + }; +}; + +#endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/tcache_types.h b/deps/jemalloc/include/jemalloc/internal/tcache_types.h new file mode 100644 index 0000000..dce6938 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/tcache_types.h @@ -0,0 +1,59 @@ +#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H +#define JEMALLOC_INTERNAL_TCACHE_TYPES_H + +#include "jemalloc/internal/sc.h" + +typedef struct tcache_s tcache_t; +typedef struct tcaches_s tcaches_t; + +/* + * tcache pointers close to NULL are used to encode state information that is + * used for two purposes: preventing thread caching on a per thread basis and + * cleaning up during thread shutdown. + */ +#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) +#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) +#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) +#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY + +/* + * Absolute minimum number of cache slots for each small bin. + */ +#define TCACHE_NSLOTS_SMALL_MIN 20 + +/* + * Absolute maximum number of cache slots for each small bin in the thread + * cache. This is an additional constraint beyond that imposed as: twice the + * number of regions per slab for this size class. + * + * This constant must be an even number. + */ +#define TCACHE_NSLOTS_SMALL_MAX 200 + +/* Number of cache slots for large size classes. */ +#define TCACHE_NSLOTS_LARGE 20 + +/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ +#define LG_TCACHE_MAXCLASS_DEFAULT 15 + +/* + * TCACHE_GC_SWEEP is the approximate number of allocation events between + * full GC sweeps. Integer rounding may cause the actual number to be + * slightly higher, since GC is performed incrementally. + */ +#define TCACHE_GC_SWEEP 8192 + +/* Number of tcache allocation/deallocation events between incremental GCs. */ +#define TCACHE_GC_INCR \ + ((TCACHE_GC_SWEEP / SC_NBINS) + ((TCACHE_GC_SWEEP / SC_NBINS == 0) ? 0 : 1)) + +/* Used in TSD static initializer only. Real init in tcache_data_init(). */ +#define TCACHE_ZERO_INITIALIZER {0} + +/* Used in TSD static initializer only. Will be initialized to opt_tcache. */ +#define TCACHE_ENABLED_ZERO_INITIALIZER false + +/* Used for explicit tcache only. Means flushed but not destroyed. */ +#define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1) + +#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/test_hooks.h b/deps/jemalloc/include/jemalloc/internal/test_hooks.h new file mode 100644 index 0000000..a6351e5 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/test_hooks.h @@ -0,0 +1,19 @@ +#ifndef JEMALLOC_INTERNAL_TEST_HOOKS_H +#define JEMALLOC_INTERNAL_TEST_HOOKS_H + +extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)(); +extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)(); + +#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn) + +#define open JEMALLOC_HOOK(open, test_hooks_libc_hook) +#define read JEMALLOC_HOOK(read, test_hooks_libc_hook) +#define write JEMALLOC_HOOK(write, test_hooks_libc_hook) +#define readlink JEMALLOC_HOOK(readlink, test_hooks_libc_hook) +#define close JEMALLOC_HOOK(close, test_hooks_libc_hook) +#define creat JEMALLOC_HOOK(creat, test_hooks_libc_hook) +#define secure_getenv JEMALLOC_HOOK(secure_getenv, test_hooks_libc_hook) +/* Note that this is undef'd and re-define'd in src/prof.c. */ +#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) + +#endif /* JEMALLOC_INTERNAL_TEST_HOOKS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/ticker.h b/deps/jemalloc/include/jemalloc/internal/ticker.h new file mode 100644 index 0000000..52d0db4 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/ticker.h @@ -0,0 +1,91 @@ +#ifndef JEMALLOC_INTERNAL_TICKER_H +#define JEMALLOC_INTERNAL_TICKER_H + +#include "jemalloc/internal/util.h" + +/** + * A ticker makes it easy to count-down events until some limit. You + * ticker_init the ticker to trigger every nticks events. You then notify it + * that an event has occurred with calls to ticker_tick (or that nticks events + * have occurred with a call to ticker_ticks), which will return true (and reset + * the counter) if the countdown hit zero. + */ + +typedef struct { + int32_t tick; + int32_t nticks; +} ticker_t; + +static inline void +ticker_init(ticker_t *ticker, int32_t nticks) { + ticker->tick = nticks; + ticker->nticks = nticks; +} + +static inline void +ticker_copy(ticker_t *ticker, const ticker_t *other) { + *ticker = *other; +} + +static inline int32_t +ticker_read(const ticker_t *ticker) { + return ticker->tick; +} + +/* + * Not intended to be a public API. Unfortunately, on x86, neither gcc nor + * clang seems smart enough to turn + * ticker->tick -= nticks; + * if (unlikely(ticker->tick < 0)) { + * fixup ticker + * return true; + * } + * return false; + * into + * subq %nticks_reg, (%ticker_reg) + * js fixup ticker + * + * unless we force "fixup ticker" out of line. In that case, gcc gets it right, + * but clang now does worse than before. So, on x86 with gcc, we force it out + * of line, but otherwise let the inlining occur. Ordinarily this wouldn't be + * worth the hassle, but this is on the fast path of both malloc and free (via + * tcache_event). + */ +#if defined(__GNUC__) && !defined(__clang__) \ + && (defined(__x86_64__) || defined(__i386__)) +JEMALLOC_NOINLINE +#endif +static bool +ticker_fixup(ticker_t *ticker) { + ticker->tick = ticker->nticks; + return true; +} + +static inline bool +ticker_ticks(ticker_t *ticker, int32_t nticks) { + ticker->tick -= nticks; + if (unlikely(ticker->tick < 0)) { + return ticker_fixup(ticker); + } + return false; +} + +static inline bool +ticker_tick(ticker_t *ticker) { + return ticker_ticks(ticker, 1); +} + +/* + * Try to tick. If ticker would fire, return true, but rely on + * slowpath to reset ticker. + */ +static inline bool +ticker_trytick(ticker_t *ticker) { + --ticker->tick; + if (unlikely(ticker->tick < 0)) { + return true; + } + return false; +} + +#endif /* JEMALLOC_INTERNAL_TICKER_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/tsd.h b/deps/jemalloc/include/jemalloc/internal/tsd.h new file mode 100644 index 0000000..9ba2600 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/tsd.h @@ -0,0 +1,415 @@ +#ifndef JEMALLOC_INTERNAL_TSD_H +#define JEMALLOC_INTERNAL_TSD_H + +#include "jemalloc/internal/arena_types.h" +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/bin_types.h" +#include "jemalloc/internal/jemalloc_internal_externs.h" +#include "jemalloc/internal/prof_types.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/rtree_tsd.h" +#include "jemalloc/internal/tcache_types.h" +#include "jemalloc/internal/tcache_structs.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/witness.h" + +/* + * Thread-Specific-Data layout + * --- data accessed on tcache fast path: state, rtree_ctx, stats, prof --- + * s: state + * e: tcache_enabled + * m: thread_allocated (config_stats) + * f: thread_deallocated (config_stats) + * p: prof_tdata (config_prof) + * c: rtree_ctx (rtree cache accessed on deallocation) + * t: tcache + * --- data not accessed on tcache fast path: arena-related fields --- + * d: arenas_tdata_bypass + * r: reentrancy_level + * x: narenas_tdata + * i: iarena + * a: arena + * o: arenas_tdata + * Loading TSD data is on the critical path of basically all malloc operations. + * In particular, tcache and rtree_ctx rely on hot CPU cache to be effective. + * Use a compact layout to reduce cache footprint. + * +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+ + * |---------------------------- 1st cacheline ----------------------------| + * | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] | + * |---------------------------- 2nd cacheline ----------------------------| + * | [c * 64 ........ ........ ........ ........ ........ ........ .......] | + * |---------------------------- 3nd cacheline ----------------------------| + * | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... | + * +-------------------------------------------------------------------------+ + * Note: the entire tcache is embedded into TSD and spans multiple cachelines. + * + * The last 3 members (i, a and o) before tcache isn't really needed on tcache + * fast path. However we have a number of unused tcache bins and witnesses + * (never touched unless config_debug) at the end of tcache, so we place them + * there to avoid breaking the cachelines and possibly paging in an extra page. + */ +#ifdef JEMALLOC_JET +typedef void (*test_callback_t)(int *); +# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10 +# define MALLOC_TEST_TSD \ + O(test_data, int, int) \ + O(test_callback, test_callback_t, int) +# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL +#else +# define MALLOC_TEST_TSD +# define MALLOC_TEST_TSD_INITIALIZER +#endif + +/* O(name, type, nullable type */ +#define MALLOC_TSD \ + O(tcache_enabled, bool, bool) \ + O(arenas_tdata_bypass, bool, bool) \ + O(reentrancy_level, int8_t, int8_t) \ + O(narenas_tdata, uint32_t, uint32_t) \ + O(offset_state, uint64_t, uint64_t) \ + O(thread_allocated, uint64_t, uint64_t) \ + O(thread_deallocated, uint64_t, uint64_t) \ + O(bytes_until_sample, int64_t, int64_t) \ + O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \ + O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \ + O(iarena, arena_t *, arena_t *) \ + O(arena, arena_t *, arena_t *) \ + O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\ + O(binshards, tsd_binshards_t, tsd_binshards_t)\ + O(tcache, tcache_t, tcache_t) \ + O(witness_tsd, witness_tsd_t, witness_tsdn_t) \ + MALLOC_TEST_TSD + +#define TSD_INITIALIZER { \ + ATOMIC_INIT(tsd_state_uninitialized), \ + TCACHE_ENABLED_ZERO_INITIALIZER, \ + false, \ + 0, \ + 0, \ + 0, \ + 0, \ + 0, \ + 0, \ + NULL, \ + RTREE_CTX_ZERO_INITIALIZER, \ + NULL, \ + NULL, \ + NULL, \ + TSD_BINSHARDS_ZERO_INITIALIZER, \ + TCACHE_ZERO_INITIALIZER, \ + WITNESS_TSD_INITIALIZER \ + MALLOC_TEST_TSD_INITIALIZER \ +} + +void *malloc_tsd_malloc(size_t size); +void malloc_tsd_dalloc(void *wrapper); +void malloc_tsd_cleanup_register(bool (*f)(void)); +tsd_t *malloc_tsd_boot0(void); +void malloc_tsd_boot1(void); +void tsd_cleanup(void *arg); +tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal); +void tsd_state_set(tsd_t *tsd, uint8_t new_state); +void tsd_slow_update(tsd_t *tsd); +void tsd_prefork(tsd_t *tsd); +void tsd_postfork_parent(tsd_t *tsd); +void tsd_postfork_child(tsd_t *tsd); + +/* + * Call ..._inc when your module wants to take all threads down the slow paths, + * and ..._dec when it no longer needs to. + */ +void tsd_global_slow_inc(tsdn_t *tsdn); +void tsd_global_slow_dec(tsdn_t *tsdn); +bool tsd_global_slow(); + +enum { + /* Common case --> jnz. */ + tsd_state_nominal = 0, + /* Initialized but on slow path. */ + tsd_state_nominal_slow = 1, + /* + * Some thread has changed global state in such a way that all nominal + * threads need to recompute their fast / slow status the next time they + * get a chance. + * + * Any thread can change another thread's status *to* recompute, but + * threads are the only ones who can change their status *from* + * recompute. + */ + tsd_state_nominal_recompute = 2, + /* + * The above nominal states should be lower values. We use + * tsd_nominal_max to separate nominal states from threads in the + * process of being born / dying. + */ + tsd_state_nominal_max = 2, + + /* + * A thread might free() during its death as its only allocator action; + * in such scenarios, we need tsd, but set up in such a way that no + * cleanup is necessary. + */ + tsd_state_minimal_initialized = 3, + /* States during which we know we're in thread death. */ + tsd_state_purgatory = 4, + tsd_state_reincarnated = 5, + /* + * What it says on the tin; tsd that hasn't been initialized. Note + * that even when the tsd struct lives in TLS, when need to keep track + * of stuff like whether or not our pthread destructors have been + * scheduled, so this really truly is different than the nominal state. + */ + tsd_state_uninitialized = 6 +}; + +/* + * Some TSD accesses can only be done in a nominal state. To enforce this, we + * wrap TSD member access in a function that asserts on TSD state, and mangle + * field names to prevent touching them accidentally. + */ +#define TSD_MANGLE(n) cant_access_tsd_items_directly_use_a_getter_or_setter_##n + +#ifdef JEMALLOC_U8_ATOMICS +# define tsd_state_t atomic_u8_t +# define tsd_atomic_load atomic_load_u8 +# define tsd_atomic_store atomic_store_u8 +# define tsd_atomic_exchange atomic_exchange_u8 +#else +# define tsd_state_t atomic_u32_t +# define tsd_atomic_load atomic_load_u32 +# define tsd_atomic_store atomic_store_u32 +# define tsd_atomic_exchange atomic_exchange_u32 +#endif + +/* The actual tsd. */ +struct tsd_s { + /* + * The contents should be treated as totally opaque outside the tsd + * module. Access any thread-local state through the getters and + * setters below. + */ + + /* + * We manually limit the state to just a single byte. Unless the 8-bit + * atomics are unavailable (which is rare). + */ + tsd_state_t state; +#define O(n, t, nt) \ + t TSD_MANGLE(n); +MALLOC_TSD +#undef O +}; + +JEMALLOC_ALWAYS_INLINE uint8_t +tsd_state_get(tsd_t *tsd) { + /* + * This should be atomic. Unfortunately, compilers right now can't tell + * that this can be done as a memory comparison, and forces a load into + * a register that hurts fast-path performance. + */ + /* return atomic_load_u8(&tsd->state, ATOMIC_RELAXED); */ + return *(uint8_t *)&tsd->state; +} + +/* + * Wrapper around tsd_t that makes it possible to avoid implicit conversion + * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be + * explicitly converted to tsd_t, which is non-nullable. + */ +struct tsdn_s { + tsd_t tsd; +}; +#define TSDN_NULL ((tsdn_t *)0) +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsd_tsdn(tsd_t *tsd) { + return (tsdn_t *)tsd; +} + +JEMALLOC_ALWAYS_INLINE bool +tsdn_null(const tsdn_t *tsdn) { + return tsdn == NULL; +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsdn_tsd(tsdn_t *tsdn) { + assert(!tsdn_null(tsdn)); + + return &tsdn->tsd; +} + +/* + * We put the platform-specific data declarations and inlines into their own + * header files to avoid cluttering this file. They define tsd_boot0, + * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set. + */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#include "jemalloc/internal/tsd_malloc_thread_cleanup.h" +#elif (defined(JEMALLOC_TLS)) +#include "jemalloc/internal/tsd_tls.h" +#elif (defined(_WIN32)) +#include "jemalloc/internal/tsd_win.h" +#else +#include "jemalloc/internal/tsd_generic.h" +#endif + +/* + * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of + * foo. This omits some safety checks, and so can be used during tsd + * initialization and cleanup. + */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE t * \ +tsd_##n##p_get_unsafe(tsd_t *tsd) { \ + return &tsd->TSD_MANGLE(n); \ +} +MALLOC_TSD +#undef O + +/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE t * \ +tsd_##n##p_get(tsd_t *tsd) { \ + /* \ + * Because the state might change asynchronously if it's \ + * nominal, we need to make sure that we only read it once. \ + */ \ + uint8_t state = tsd_state_get(tsd); \ + assert(state == tsd_state_nominal || \ + state == tsd_state_nominal_slow || \ + state == tsd_state_nominal_recompute || \ + state == tsd_state_reincarnated || \ + state == tsd_state_minimal_initialized); \ + return tsd_##n##p_get_unsafe(tsd); \ +} +MALLOC_TSD +#undef O + +/* + * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn + * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type. + */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE nt * \ +tsdn_##n##p_get(tsdn_t *tsdn) { \ + if (tsdn_null(tsdn)) { \ + return NULL; \ + } \ + tsd_t *tsd = tsdn_tsd(tsdn); \ + return (nt *)tsd_##n##p_get(tsd); \ +} +MALLOC_TSD +#undef O + +/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE t \ +tsd_##n##_get(tsd_t *tsd) { \ + return *tsd_##n##p_get(tsd); \ +} +MALLOC_TSD +#undef O + +/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE void \ +tsd_##n##_set(tsd_t *tsd, t val) { \ + assert(tsd_state_get(tsd) != tsd_state_reincarnated && \ + tsd_state_get(tsd) != tsd_state_minimal_initialized); \ + *tsd_##n##p_get(tsd) = val; \ +} +MALLOC_TSD +#undef O + +JEMALLOC_ALWAYS_INLINE void +tsd_assert_fast(tsd_t *tsd) { + /* + * Note that our fastness assertion does *not* include global slowness + * counters; it's not in general possible to ensure that they won't + * change asynchronously from underneath us. + */ + assert(!malloc_slow && tsd_tcache_enabled_get(tsd) && + tsd_reentrancy_level_get(tsd) == 0); +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_fast(tsd_t *tsd) { + bool fast = (tsd_state_get(tsd) == tsd_state_nominal); + if (fast) { + tsd_assert_fast(tsd); + } + + return fast; +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch_impl(bool init, bool minimal) { + tsd_t *tsd = tsd_get(init); + + if (!init && tsd_get_allocates() && tsd == NULL) { + return NULL; + } + assert(tsd != NULL); + + if (unlikely(tsd_state_get(tsd) != tsd_state_nominal)) { + return tsd_fetch_slow(tsd, minimal); + } + assert(tsd_fast(tsd)); + tsd_assert_fast(tsd); + + return tsd; +} + +/* Get a minimal TSD that requires no cleanup. See comments in free(). */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch_min(void) { + return tsd_fetch_impl(true, true); +} + +/* For internal background threads use only. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_internal_fetch(void) { + tsd_t *tsd = tsd_fetch_min(); + /* Use reincarnated state to prevent full initialization. */ + tsd_state_set(tsd, tsd_state_reincarnated); + + return tsd; +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch(void) { + return tsd_fetch_impl(true, false); +} + +static inline bool +tsd_nominal(tsd_t *tsd) { + return (tsd_state_get(tsd) <= tsd_state_nominal_max); +} + +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsdn_fetch(void) { + if (!tsd_booted_get()) { + return NULL; + } + + return tsd_tsdn(tsd_fetch_impl(false, false)); +} + +JEMALLOC_ALWAYS_INLINE rtree_ctx_t * +tsd_rtree_ctx(tsd_t *tsd) { + return tsd_rtree_ctxp_get(tsd); +} + +JEMALLOC_ALWAYS_INLINE rtree_ctx_t * +tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) { + /* + * If tsd cannot be accessed, initialize the fallback rtree_ctx and + * return a pointer to it. + */ + if (unlikely(tsdn_null(tsdn))) { + rtree_ctx_data_init(fallback); + return fallback; + } + return tsd_rtree_ctx(tsdn_tsd(tsdn)); +} + +#endif /* JEMALLOC_INTERNAL_TSD_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/tsd_generic.h b/deps/jemalloc/include/jemalloc/internal/tsd_generic.h new file mode 100644 index 0000000..cf73c0c --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/tsd_generic.h @@ -0,0 +1,163 @@ +#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H +#error This file should be included only once, by tsd.h. +#endif +#define JEMALLOC_INTERNAL_TSD_GENERIC_H + +typedef struct tsd_init_block_s tsd_init_block_t; +struct tsd_init_block_s { + ql_elm(tsd_init_block_t) link; + pthread_t thread; + void *data; +}; + +/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */ +typedef struct tsd_init_head_s tsd_init_head_t; + +typedef struct { + bool initialized; + tsd_t val; +} tsd_wrapper_t; + +void *tsd_init_check_recursion(tsd_init_head_t *head, + tsd_init_block_t *block); +void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); + +extern pthread_key_t tsd_tsd; +extern tsd_init_head_t tsd_init_head; +extern tsd_wrapper_t tsd_boot_wrapper; +extern bool tsd_booted; + +/* Initialization/cleanup. */ +JEMALLOC_ALWAYS_INLINE void +tsd_cleanup_wrapper(void *arg) { + tsd_wrapper_t *wrapper = (tsd_wrapper_t *)arg; + + if (wrapper->initialized) { + wrapper->initialized = false; + tsd_cleanup(&wrapper->val); + if (wrapper->initialized) { + /* Trigger another cleanup round. */ + if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) + { + malloc_write(": Error setting TSD\n"); + if (opt_abort) { + abort(); + } + } + return; + } + } + malloc_tsd_dalloc(wrapper); +} + +JEMALLOC_ALWAYS_INLINE void +tsd_wrapper_set(tsd_wrapper_t *wrapper) { + if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) { + malloc_write(": Error setting TSD\n"); + abort(); + } +} + +JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * +tsd_wrapper_get(bool init) { + tsd_wrapper_t *wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd); + + if (init && unlikely(wrapper == NULL)) { + tsd_init_block_t block; + wrapper = (tsd_wrapper_t *) + tsd_init_check_recursion(&tsd_init_head, &block); + if (wrapper) { + return wrapper; + } + wrapper = (tsd_wrapper_t *) + malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + block.data = (void *)wrapper; + if (wrapper == NULL) { + malloc_write(": Error allocating TSD\n"); + abort(); + } else { + wrapper->initialized = false; + JEMALLOC_DIAGNOSTIC_PUSH + JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS + tsd_t initializer = TSD_INITIALIZER; + JEMALLOC_DIAGNOSTIC_POP + wrapper->val = initializer; + } + tsd_wrapper_set(wrapper); + tsd_init_finish(&tsd_init_head, &block); + } + return wrapper; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot0(void) { + if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) { + return true; + } + tsd_wrapper_set(&tsd_boot_wrapper); + tsd_booted = true; + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_boot1(void) { + tsd_wrapper_t *wrapper; + wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + if (wrapper == NULL) { + malloc_write(": Error allocating TSD\n"); + abort(); + } + tsd_boot_wrapper.initialized = false; + tsd_cleanup(&tsd_boot_wrapper.val); + wrapper->initialized = false; + JEMALLOC_DIAGNOSTIC_PUSH + JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS + tsd_t initializer = TSD_INITIALIZER; + JEMALLOC_DIAGNOSTIC_POP + wrapper->val = initializer; + tsd_wrapper_set(wrapper); +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot(void) { + if (tsd_boot0()) { + return true; + } + tsd_boot1(); + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_booted_get(void) { + return tsd_booted; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_get_allocates(void) { + return true; +} + +/* Get/set. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_get(bool init) { + tsd_wrapper_t *wrapper; + + assert(tsd_booted); + wrapper = tsd_wrapper_get(init); + if (tsd_get_allocates() && !init && wrapper == NULL) { + return NULL; + } + return &wrapper->val; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_set(tsd_t *val) { + tsd_wrapper_t *wrapper; + + assert(tsd_booted); + wrapper = tsd_wrapper_get(true); + if (likely(&wrapper->val != val)) { + wrapper->val = *(val); + } + wrapper->initialized = true; +} diff --git a/deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h b/deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h new file mode 100644 index 0000000..65852d5 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h @@ -0,0 +1,61 @@ +#ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H +#error This file should be included only once, by tsd.h. +#endif +#define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H + +#define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL + +extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls; +extern JEMALLOC_TSD_TYPE_ATTR(bool) tsd_initialized; +extern bool tsd_booted; + +/* Initialization/cleanup. */ +JEMALLOC_ALWAYS_INLINE bool +tsd_cleanup_wrapper(void) { + if (tsd_initialized) { + tsd_initialized = false; + tsd_cleanup(&tsd_tls); + } + return tsd_initialized; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot0(void) { + malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); + tsd_booted = true; + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_boot1(void) { + /* Do nothing. */ +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot(void) { + return tsd_boot0(); +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_booted_get(void) { + return tsd_booted; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_get_allocates(void) { + return false; +} + +/* Get/set. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_get(bool init) { + return &tsd_tls; +} +JEMALLOC_ALWAYS_INLINE void +tsd_set(tsd_t *val) { + assert(tsd_booted); + if (likely(&tsd_tls != val)) { + tsd_tls = (*val); + } + tsd_initialized = true; +} diff --git a/deps/jemalloc/include/jemalloc/internal/tsd_tls.h b/deps/jemalloc/include/jemalloc/internal/tsd_tls.h new file mode 100644 index 0000000..7d6c805 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/tsd_tls.h @@ -0,0 +1,60 @@ +#ifdef JEMALLOC_INTERNAL_TSD_TLS_H +#error This file should be included only once, by tsd.h. +#endif +#define JEMALLOC_INTERNAL_TSD_TLS_H + +#define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL + +extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls; +extern pthread_key_t tsd_tsd; +extern bool tsd_booted; + +/* Initialization/cleanup. */ +JEMALLOC_ALWAYS_INLINE bool +tsd_boot0(void) { + if (pthread_key_create(&tsd_tsd, &tsd_cleanup) != 0) { + return true; + } + tsd_booted = true; + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_boot1(void) { + /* Do nothing. */ +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot(void) { + return tsd_boot0(); +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_booted_get(void) { + return tsd_booted; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_get_allocates(void) { + return false; +} + +/* Get/set. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_get(bool init) { + return &tsd_tls; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_set(tsd_t *val) { + assert(tsd_booted); + if (likely(&tsd_tls != val)) { + tsd_tls = (*val); + } + if (pthread_setspecific(tsd_tsd, (void *)(&tsd_tls)) != 0) { + malloc_write(": Error setting tsd.\n"); + if (opt_abort) { + abort(); + } + } +} diff --git a/deps/jemalloc/include/jemalloc/internal/tsd_types.h b/deps/jemalloc/include/jemalloc/internal/tsd_types.h new file mode 100644 index 0000000..6200af6 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/tsd_types.h @@ -0,0 +1,10 @@ +#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H +#define JEMALLOC_INTERNAL_TSD_TYPES_H + +#define MALLOC_TSD_CLEANUPS_MAX 2 + +typedef struct tsd_s tsd_t; +typedef struct tsdn_s tsdn_t; +typedef bool (*malloc_tsd_cleanup_t)(void); + +#endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/tsd_win.h b/deps/jemalloc/include/jemalloc/internal/tsd_win.h new file mode 100644 index 0000000..cf30d18 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/tsd_win.h @@ -0,0 +1,139 @@ +#ifdef JEMALLOC_INTERNAL_TSD_WIN_H +#error This file should be included only once, by tsd.h. +#endif +#define JEMALLOC_INTERNAL_TSD_WIN_H + +typedef struct { + bool initialized; + tsd_t val; +} tsd_wrapper_t; + +extern DWORD tsd_tsd; +extern tsd_wrapper_t tsd_boot_wrapper; +extern bool tsd_booted; + +/* Initialization/cleanup. */ +JEMALLOC_ALWAYS_INLINE bool +tsd_cleanup_wrapper(void) { + DWORD error = GetLastError(); + tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd); + SetLastError(error); + + if (wrapper == NULL) { + return false; + } + + if (wrapper->initialized) { + wrapper->initialized = false; + tsd_cleanup(&wrapper->val); + if (wrapper->initialized) { + /* Trigger another cleanup round. */ + return true; + } + } + malloc_tsd_dalloc(wrapper); + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_wrapper_set(tsd_wrapper_t *wrapper) { + if (!TlsSetValue(tsd_tsd, (void *)wrapper)) { + malloc_write(": Error setting TSD\n"); + abort(); + } +} + +JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * +tsd_wrapper_get(bool init) { + DWORD error = GetLastError(); + tsd_wrapper_t *wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd); + SetLastError(error); + + if (init && unlikely(wrapper == NULL)) { + wrapper = (tsd_wrapper_t *) + malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + if (wrapper == NULL) { + malloc_write(": Error allocating TSD\n"); + abort(); + } else { + wrapper->initialized = false; + /* MSVC is finicky about aggregate initialization. */ + tsd_t tsd_initializer = TSD_INITIALIZER; + wrapper->val = tsd_initializer; + } + tsd_wrapper_set(wrapper); + } + return wrapper; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot0(void) { + tsd_tsd = TlsAlloc(); + if (tsd_tsd == TLS_OUT_OF_INDEXES) { + return true; + } + malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); + tsd_wrapper_set(&tsd_boot_wrapper); + tsd_booted = true; + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_boot1(void) { + tsd_wrapper_t *wrapper; + wrapper = (tsd_wrapper_t *) + malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + if (wrapper == NULL) { + malloc_write(": Error allocating TSD\n"); + abort(); + } + tsd_boot_wrapper.initialized = false; + tsd_cleanup(&tsd_boot_wrapper.val); + wrapper->initialized = false; + tsd_t initializer = TSD_INITIALIZER; + wrapper->val = initializer; + tsd_wrapper_set(wrapper); +} +JEMALLOC_ALWAYS_INLINE bool +tsd_boot(void) { + if (tsd_boot0()) { + return true; + } + tsd_boot1(); + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_booted_get(void) { + return tsd_booted; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_get_allocates(void) { + return true; +} + +/* Get/set. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_get(bool init) { + tsd_wrapper_t *wrapper; + + assert(tsd_booted); + wrapper = tsd_wrapper_get(init); + if (tsd_get_allocates() && !init && wrapper == NULL) { + return NULL; + } + return &wrapper->val; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_set(tsd_t *val) { + tsd_wrapper_t *wrapper; + + assert(tsd_booted); + wrapper = tsd_wrapper_get(true); + if (likely(&wrapper->val != val)) { + wrapper->val = *(val); + } + wrapper->initialized = true; +} diff --git a/deps/jemalloc/include/jemalloc/internal/util.h b/deps/jemalloc/include/jemalloc/internal/util.h new file mode 100644 index 0000000..304cb54 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/util.h @@ -0,0 +1,67 @@ +#ifndef JEMALLOC_INTERNAL_UTIL_H +#define JEMALLOC_INTERNAL_UTIL_H + +#define UTIL_INLINE static inline + +/* Junk fill patterns. */ +#ifndef JEMALLOC_ALLOC_JUNK +# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) +#endif +#ifndef JEMALLOC_FREE_JUNK +# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) +#endif + +/* + * Wrap a cpp argument that contains commas such that it isn't broken up into + * multiple arguments. + */ +#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ + +/* cpp macro definition stringification. */ +#define STRINGIFY_HELPER(x) #x +#define STRINGIFY(x) STRINGIFY_HELPER(x) + +/* + * Silence compiler warnings due to uninitialized values. This is used + * wherever the compiler fails to recognize that the variable is never used + * uninitialized. + */ +#define JEMALLOC_CC_SILENCE_INIT(v) = v + +#ifdef __GNUC__ +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +#else +# define likely(x) !!(x) +# define unlikely(x) !!(x) +#endif + +#if !defined(JEMALLOC_INTERNAL_UNREACHABLE) +# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure +#endif + +#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() + +/* Set error code. */ +UTIL_INLINE void +set_errno(int errnum) { +#ifdef _WIN32 + SetLastError(errnum); +#else + errno = errnum; +#endif +} + +/* Get last error code. */ +UTIL_INLINE int +get_errno(void) { +#ifdef _WIN32 + return GetLastError(); +#else + return errno; +#endif +} + +#undef UTIL_INLINE + +#endif /* JEMALLOC_INTERNAL_UTIL_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/witness.h b/deps/jemalloc/include/jemalloc/internal/witness.h new file mode 100644 index 0000000..fff9e98 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/internal/witness.h @@ -0,0 +1,347 @@ +#ifndef JEMALLOC_INTERNAL_WITNESS_H +#define JEMALLOC_INTERNAL_WITNESS_H + +#include "jemalloc/internal/ql.h" + +/******************************************************************************/ +/* LOCK RANKS */ +/******************************************************************************/ + +/* + * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness + * machinery. + */ + +#define WITNESS_RANK_OMIT 0U + +#define WITNESS_RANK_MIN 1U + +#define WITNESS_RANK_INIT 1U +#define WITNESS_RANK_CTL 1U +#define WITNESS_RANK_TCACHES 2U +#define WITNESS_RANK_ARENAS 3U + +#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U + +#define WITNESS_RANK_PROF_DUMP 5U +#define WITNESS_RANK_PROF_BT2GCTX 6U +#define WITNESS_RANK_PROF_TDATAS 7U +#define WITNESS_RANK_PROF_TDATA 8U +#define WITNESS_RANK_PROF_LOG 9U +#define WITNESS_RANK_PROF_GCTX 10U +#define WITNESS_RANK_BACKGROUND_THREAD 11U + +/* + * Used as an argument to witness_assert_depth_to_rank() in order to validate + * depth excluding non-core locks with lower ranks. Since the rank argument to + * witness_assert_depth_to_rank() is inclusive rather than exclusive, this + * definition can have the same value as the minimally ranked core lock. + */ +#define WITNESS_RANK_CORE 12U + +#define WITNESS_RANK_DECAY 12U +#define WITNESS_RANK_TCACHE_QL 13U +#define WITNESS_RANK_EXTENT_GROW 14U +#define WITNESS_RANK_EXTENTS 15U +#define WITNESS_RANK_EXTENT_AVAIL 16U + +#define WITNESS_RANK_EXTENT_POOL 17U +#define WITNESS_RANK_RTREE 18U +#define WITNESS_RANK_BASE 19U +#define WITNESS_RANK_ARENA_LARGE 20U +#define WITNESS_RANK_HOOK 21U + +#define WITNESS_RANK_LEAF 0xffffffffU +#define WITNESS_RANK_BIN WITNESS_RANK_LEAF +#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF +#define WITNESS_RANK_DSS WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF + +/******************************************************************************/ +/* PER-WITNESS DATA */ +/******************************************************************************/ +#if defined(JEMALLOC_DEBUG) +# define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}} +#else +# define WITNESS_INITIALIZER(name, rank) +#endif + +typedef struct witness_s witness_t; +typedef unsigned witness_rank_t; +typedef ql_head(witness_t) witness_list_t; +typedef int witness_comp_t (const witness_t *, void *, const witness_t *, + void *); + +struct witness_s { + /* Name, used for printing lock order reversal messages. */ + const char *name; + + /* + * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses + * must be acquired in order of increasing rank. + */ + witness_rank_t rank; + + /* + * If two witnesses are of equal rank and they have the samp comp + * function pointer, it is called as a last attempt to differentiate + * between witnesses of equal rank. + */ + witness_comp_t *comp; + + /* Opaque data, passed to comp(). */ + void *opaque; + + /* Linkage for thread's currently owned locks. */ + ql_elm(witness_t) link; +}; + +/******************************************************************************/ +/* PER-THREAD DATA */ +/******************************************************************************/ +typedef struct witness_tsd_s witness_tsd_t; +struct witness_tsd_s { + witness_list_t witnesses; + bool forking; +}; + +#define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false } +#define WITNESS_TSDN_NULL ((witness_tsdn_t *)0) + +/******************************************************************************/ +/* (PER-THREAD) NULLABILITY HELPERS */ +/******************************************************************************/ +typedef struct witness_tsdn_s witness_tsdn_t; +struct witness_tsdn_s { + witness_tsd_t witness_tsd; +}; + +JEMALLOC_ALWAYS_INLINE witness_tsdn_t * +witness_tsd_tsdn(witness_tsd_t *witness_tsd) { + return (witness_tsdn_t *)witness_tsd; +} + +JEMALLOC_ALWAYS_INLINE bool +witness_tsdn_null(witness_tsdn_t *witness_tsdn) { + return witness_tsdn == NULL; +} + +JEMALLOC_ALWAYS_INLINE witness_tsd_t * +witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) { + assert(!witness_tsdn_null(witness_tsdn)); + return &witness_tsdn->witness_tsd; +} + +/******************************************************************************/ +/* API */ +/******************************************************************************/ +void witness_init(witness_t *witness, const char *name, witness_rank_t rank, + witness_comp_t *comp, void *opaque); + +typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); +extern witness_lock_error_t *JET_MUTABLE witness_lock_error; + +typedef void (witness_owner_error_t)(const witness_t *); +extern witness_owner_error_t *JET_MUTABLE witness_owner_error; + +typedef void (witness_not_owner_error_t)(const witness_t *); +extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error; + +typedef void (witness_depth_error_t)(const witness_list_t *, + witness_rank_t rank_inclusive, unsigned depth); +extern witness_depth_error_t *JET_MUTABLE witness_depth_error; + +void witnesses_cleanup(witness_tsd_t *witness_tsd); +void witness_prefork(witness_tsd_t *witness_tsd); +void witness_postfork_parent(witness_tsd_t *witness_tsd); +void witness_postfork_child(witness_tsd_t *witness_tsd); + +/* Helper, not intended for direct use. */ +static inline bool +witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) { + witness_list_t *witnesses; + witness_t *w; + + cassert(config_debug); + + witnesses = &witness_tsd->witnesses; + ql_foreach(w, witnesses, link) { + if (w == witness) { + return true; + } + } + + return false; +} + +static inline void +witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) { + witness_tsd_t *witness_tsd; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + if (witness->rank == WITNESS_RANK_OMIT) { + return; + } + + if (witness_owner(witness_tsd, witness)) { + return; + } + witness_owner_error(witness); +} + +static inline void +witness_assert_not_owner(witness_tsdn_t *witness_tsdn, + const witness_t *witness) { + witness_tsd_t *witness_tsd; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + if (witness->rank == WITNESS_RANK_OMIT) { + return; + } + + witnesses = &witness_tsd->witnesses; + ql_foreach(w, witnesses, link) { + if (w == witness) { + witness_not_owner_error(witness); + } + } +} + +static inline void +witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn, + witness_rank_t rank_inclusive, unsigned depth) { + witness_tsd_t *witness_tsd; + unsigned d; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + + d = 0; + witnesses = &witness_tsd->witnesses; + w = ql_last(witnesses, link); + if (w != NULL) { + ql_reverse_foreach(w, witnesses, link) { + if (w->rank < rank_inclusive) { + break; + } + d++; + } + } + if (d != depth) { + witness_depth_error(witnesses, rank_inclusive, depth); + } +} + +static inline void +witness_assert_depth(witness_tsdn_t *witness_tsdn, unsigned depth) { + witness_assert_depth_to_rank(witness_tsdn, WITNESS_RANK_MIN, depth); +} + +static inline void +witness_assert_lockless(witness_tsdn_t *witness_tsdn) { + witness_assert_depth(witness_tsdn, 0); +} + +static inline void +witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) { + witness_tsd_t *witness_tsd; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + if (witness->rank == WITNESS_RANK_OMIT) { + return; + } + + witness_assert_not_owner(witness_tsdn, witness); + + witnesses = &witness_tsd->witnesses; + w = ql_last(witnesses, link); + if (w == NULL) { + /* No other locks; do nothing. */ + } else if (witness_tsd->forking && w->rank <= witness->rank) { + /* Forking, and relaxed ranking satisfied. */ + } else if (w->rank > witness->rank) { + /* Not forking, rank order reversal. */ + witness_lock_error(witnesses, witness); + } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != + witness->comp || w->comp(w, w->opaque, witness, witness->opaque) > + 0)) { + /* + * Missing/incompatible comparison function, or comparison + * function indicates rank order reversal. + */ + witness_lock_error(witnesses, witness); + } + + ql_elm_new(witness, link); + ql_tail_insert(witnesses, witness, link); +} + +static inline void +witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) { + witness_tsd_t *witness_tsd; + witness_list_t *witnesses; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + if (witness->rank == WITNESS_RANK_OMIT) { + return; + } + + /* + * Check whether owner before removal, rather than relying on + * witness_assert_owner() to abort, so that unit tests can test this + * function's failure mode without causing undefined behavior. + */ + if (witness_owner(witness_tsd, witness)) { + witnesses = &witness_tsd->witnesses; + ql_remove(witnesses, witness, link); + } else { + witness_assert_owner(witness_tsdn, witness); + } +} + +#endif /* JEMALLOC_INTERNAL_WITNESS_H */ diff --git a/deps/jemalloc/include/jemalloc/jemalloc.sh b/deps/jemalloc/include/jemalloc/jemalloc.sh new file mode 100755 index 0000000..b19b154 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/jemalloc.sh @@ -0,0 +1,27 @@ +#!/bin/sh + +objroot=$1 + +cat < +#include +#include +#include +#include + +#define JEMALLOC_VERSION "@jemalloc_version@" +#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ +#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ +#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ +#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ +#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" +#define JEMALLOC_VERSION_GID_IDENT @jemalloc_version_gid@ + +#define MALLOCX_LG_ALIGN(la) ((int)(la)) +#if LG_SIZEOF_PTR == 2 +# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) +#else +# define MALLOCX_ALIGN(a) \ + ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ + ffs((int)(((size_t)(a))>>32))+31)) +#endif +#define MALLOCX_ZERO ((int)0x40) +/* + * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 + * encodes MALLOCX_TCACHE_NONE. + */ +#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) +#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) +/* + * Bias arena index bits so that 0 encodes "use an automatically chosen arena". + */ +#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) + +/* + * Use as arena index in "arena..{purge,decay,dss}" and + * "stats.arenas..*" mallctl interfaces to select all arenas. This + * definition is intentionally specified in raw decimal format to support + * cpp-based string concatenation, e.g. + * + * #define STRINGIFY_HELPER(x) #x + * #define STRINGIFY(x) STRINGIFY_HELPER(x) + * + * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL, + * 0); + */ +#define MALLCTL_ARENAS_ALL 4096 +/* + * Use as arena index in "stats.arenas..*" mallctl interfaces to select + * destroyed arenas. + */ +#define MALLCTL_ARENAS_DESTROYED 4097 + +#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) +# define JEMALLOC_CXX_THROW throw() +#else +# define JEMALLOC_CXX_THROW +#endif + +#if defined(_MSC_VER) +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) __declspec(align(s)) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# ifndef JEMALLOC_EXPORT +# ifdef DLLEXPORT +# define JEMALLOC_EXPORT __declspec(dllexport) +# else +# define JEMALLOC_EXPORT __declspec(dllimport) +# endif +# endif +# define JEMALLOC_FORMAT_ARG(i) +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE __declspec(noinline) +# ifdef __cplusplus +# define JEMALLOC_NOTHROW __declspec(nothrow) +# else +# define JEMALLOC_NOTHROW +# endif +# define JEMALLOC_SECTION(s) __declspec(allocate(s)) +# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) +# if _MSC_VER >= 1900 && !defined(__EDG__) +# define JEMALLOC_ALLOCATOR __declspec(allocator) +# else +# define JEMALLOC_ALLOCATOR +# endif +#elif defined(JEMALLOC_HAVE_ATTR) +# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) +# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE +# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) +# else +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# endif +# ifndef JEMALLOC_EXPORT +# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) +# endif +# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG +# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3)) +# else +# define JEMALLOC_FORMAT_ARG(i) +# endif +# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) +# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) +# else +# define JEMALLOC_FORMAT_PRINTF(s, i) +# endif +# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) +# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) +# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#else +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# define JEMALLOC_EXPORT +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE +# define JEMALLOC_NOTHROW +# define JEMALLOC_SECTION(s) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#endif + +/* This version of Jemalloc, modified for Redis, has the je_get_defrag_hint() + * function. */ +#define JEMALLOC_FRAG_HINT diff --git a/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh b/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh new file mode 100755 index 0000000..c675bb4 --- /dev/null +++ b/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh @@ -0,0 +1,45 @@ +#!/bin/sh -eu + +public_symbols_txt=$1 +symbol_prefix=$2 + +cat < + +/* MSVC doesn't define _Bool or bool in C, but does have BOOL */ +/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ +/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as + * a built-in type. */ +#ifndef __clang__ +typedef BOOL _Bool; +#endif + +#define bool _Bool +#define true 1 +#define false 0 + +#define __bool_true_false_are_defined 1 + +#endif /* stdbool_h */ diff --git a/deps/jemalloc/include/msvc_compat/C99/stdint.h b/deps/jemalloc/include/msvc_compat/C99/stdint.h new file mode 100644 index 0000000..d02608a --- /dev/null +++ b/deps/jemalloc/include/msvc_compat/C99/stdint.h @@ -0,0 +1,247 @@ +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef _MSC_VER // [ +#error "Use this header only with Microsoft Visual C++ compilers!" +#endif // _MSC_VER ] + +#ifndef _MSC_STDINT_H_ // [ +#define _MSC_STDINT_H_ + +#if _MSC_VER > 1000 +#pragma once +#endif + +#include + +// For Visual Studio 6 in C++ mode and for many Visual Studio versions when +// compiling for ARM we should wrap include with 'extern "C++" {}' +// or compiler give many errors like this: +// error C2733: second C linkage of overloaded function 'wmemchr' not allowed +#ifdef __cplusplus +extern "C" { +#endif +# include +#ifdef __cplusplus +} +#endif + +// Define _W64 macros to mark types changing their size, like intptr_t. +#ifndef _W64 +# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 +# define _W64 __w64 +# else +# define _W64 +# endif +#endif + + +// 7.18.1 Integer types + +// 7.18.1.1 Exact-width integer types + +// Visual Studio 6 and Embedded Visual C++ 4 doesn't +// realize that, e.g. char has the same size as __int8 +// so we give up on __intX for them. +#if (_MSC_VER < 1300) + typedef signed char int8_t; + typedef signed short int16_t; + typedef signed int int32_t; + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; +#else + typedef signed __int8 int8_t; + typedef signed __int16 int16_t; + typedef signed __int32 int32_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; +#endif +typedef signed __int64 int64_t; +typedef unsigned __int64 uint64_t; + + +// 7.18.1.2 Minimum-width integer types +typedef int8_t int_least8_t; +typedef int16_t int_least16_t; +typedef int32_t int_least32_t; +typedef int64_t int_least64_t; +typedef uint8_t uint_least8_t; +typedef uint16_t uint_least16_t; +typedef uint32_t uint_least32_t; +typedef uint64_t uint_least64_t; + +// 7.18.1.3 Fastest minimum-width integer types +typedef int8_t int_fast8_t; +typedef int16_t int_fast16_t; +typedef int32_t int_fast32_t; +typedef int64_t int_fast64_t; +typedef uint8_t uint_fast8_t; +typedef uint16_t uint_fast16_t; +typedef uint32_t uint_fast32_t; +typedef uint64_t uint_fast64_t; + +// 7.18.1.4 Integer types capable of holding object pointers +#ifdef _WIN64 // [ + typedef signed __int64 intptr_t; + typedef unsigned __int64 uintptr_t; +#else // _WIN64 ][ + typedef _W64 signed int intptr_t; + typedef _W64 unsigned int uintptr_t; +#endif // _WIN64 ] + +// 7.18.1.5 Greatest-width integer types +typedef int64_t intmax_t; +typedef uint64_t uintmax_t; + + +// 7.18.2 Limits of specified-width integer types + +#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 + +// 7.18.2.1 Limits of exact-width integer types +#define INT8_MIN ((int8_t)_I8_MIN) +#define INT8_MAX _I8_MAX +#define INT16_MIN ((int16_t)_I16_MIN) +#define INT16_MAX _I16_MAX +#define INT32_MIN ((int32_t)_I32_MIN) +#define INT32_MAX _I32_MAX +#define INT64_MIN ((int64_t)_I64_MIN) +#define INT64_MAX _I64_MAX +#define UINT8_MAX _UI8_MAX +#define UINT16_MAX _UI16_MAX +#define UINT32_MAX _UI32_MAX +#define UINT64_MAX _UI64_MAX + +// 7.18.2.2 Limits of minimum-width integer types +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST8_MAX INT8_MAX +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST16_MAX INT16_MAX +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST32_MAX INT32_MAX +#define INT_LEAST64_MIN INT64_MIN +#define INT_LEAST64_MAX INT64_MAX +#define UINT_LEAST8_MAX UINT8_MAX +#define UINT_LEAST16_MAX UINT16_MAX +#define UINT_LEAST32_MAX UINT32_MAX +#define UINT_LEAST64_MAX UINT64_MAX + +// 7.18.2.3 Limits of fastest minimum-width integer types +#define INT_FAST8_MIN INT8_MIN +#define INT_FAST8_MAX INT8_MAX +#define INT_FAST16_MIN INT16_MIN +#define INT_FAST16_MAX INT16_MAX +#define INT_FAST32_MIN INT32_MIN +#define INT_FAST32_MAX INT32_MAX +#define INT_FAST64_MIN INT64_MIN +#define INT_FAST64_MAX INT64_MAX +#define UINT_FAST8_MAX UINT8_MAX +#define UINT_FAST16_MAX UINT16_MAX +#define UINT_FAST32_MAX UINT32_MAX +#define UINT_FAST64_MAX UINT64_MAX + +// 7.18.2.4 Limits of integer types capable of holding object pointers +#ifdef _WIN64 // [ +# define INTPTR_MIN INT64_MIN +# define INTPTR_MAX INT64_MAX +# define UINTPTR_MAX UINT64_MAX +#else // _WIN64 ][ +# define INTPTR_MIN INT32_MIN +# define INTPTR_MAX INT32_MAX +# define UINTPTR_MAX UINT32_MAX +#endif // _WIN64 ] + +// 7.18.2.5 Limits of greatest-width integer types +#define INTMAX_MIN INT64_MIN +#define INTMAX_MAX INT64_MAX +#define UINTMAX_MAX UINT64_MAX + +// 7.18.3 Limits of other integer types + +#ifdef _WIN64 // [ +# define PTRDIFF_MIN _I64_MIN +# define PTRDIFF_MAX _I64_MAX +#else // _WIN64 ][ +# define PTRDIFF_MIN _I32_MIN +# define PTRDIFF_MAX _I32_MAX +#endif // _WIN64 ] + +#define SIG_ATOMIC_MIN INT_MIN +#define SIG_ATOMIC_MAX INT_MAX + +#ifndef SIZE_MAX // [ +# ifdef _WIN64 // [ +# define SIZE_MAX _UI64_MAX +# else // _WIN64 ][ +# define SIZE_MAX _UI32_MAX +# endif // _WIN64 ] +#endif // SIZE_MAX ] + +// WCHAR_MIN and WCHAR_MAX are also defined in +#ifndef WCHAR_MIN // [ +# define WCHAR_MIN 0 +#endif // WCHAR_MIN ] +#ifndef WCHAR_MAX // [ +# define WCHAR_MAX _UI16_MAX +#endif // WCHAR_MAX ] + +#define WINT_MIN 0 +#define WINT_MAX _UI16_MAX + +#endif // __STDC_LIMIT_MACROS ] + + +// 7.18.4 Limits of other integer types + +#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 + +// 7.18.4.1 Macros for minimum-width integer constants + +#define INT8_C(val) val##i8 +#define INT16_C(val) val##i16 +#define INT32_C(val) val##i32 +#define INT64_C(val) val##i64 + +#define UINT8_C(val) val##ui8 +#define UINT16_C(val) val##ui16 +#define UINT32_C(val) val##ui32 +#define UINT64_C(val) val##ui64 + +// 7.18.4.2 Macros for greatest-width integer constants +#define INTMAX_C INT64_C +#define UINTMAX_C UINT64_C + +#endif // __STDC_CONSTANT_MACROS ] + + +#endif // _MSC_STDINT_H_ ] diff --git a/deps/jemalloc/include/msvc_compat/strings.h b/deps/jemalloc/include/msvc_compat/strings.h new file mode 100644 index 0000000..996f256 --- /dev/null +++ b/deps/jemalloc/include/msvc_compat/strings.h @@ -0,0 +1,58 @@ +#ifndef strings_h +#define strings_h + +/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided + * for both */ +#ifdef _MSC_VER +# include +# pragma intrinsic(_BitScanForward) +static __forceinline int ffsl(long x) { + unsigned long i; + + if (_BitScanForward(&i, x)) { + return i + 1; + } + return 0; +} + +static __forceinline int ffs(int x) { + return ffsl(x); +} + +# ifdef _M_X64 +# pragma intrinsic(_BitScanForward64) +# endif + +static __forceinline int ffsll(unsigned __int64 x) { + unsigned long i; +#ifdef _M_X64 + if (_BitScanForward64(&i, x)) { + return i + 1; + } + return 0; +#else +// Fallback for 32-bit build where 64-bit version not available +// assuming little endian + union { + unsigned __int64 ll; + unsigned long l[2]; + } s; + + s.ll = x; + + if (_BitScanForward(&i, s.l[0])) { + return i + 1; + } else if(_BitScanForward(&i, s.l[1])) { + return i + 33; + } + return 0; +#endif +} + +#else +# define ffsll(x) __builtin_ffsll(x) +# define ffsl(x) __builtin_ffsl(x) +# define ffs(x) __builtin_ffs(x) +#endif + +#endif /* strings_h */ diff --git a/deps/jemalloc/include/msvc_compat/windows_extra.h b/deps/jemalloc/include/msvc_compat/windows_extra.h new file mode 100644 index 0000000..a6ebb93 --- /dev/null +++ b/deps/jemalloc/include/msvc_compat/windows_extra.h @@ -0,0 +1,6 @@ +#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H +#define MSVC_COMPAT_WINDOWS_EXTRA_H + +#include + +#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */ diff --git a/deps/jemalloc/jemalloc.pc.in b/deps/jemalloc/jemalloc.pc.in new file mode 100644 index 0000000..c428a86 --- /dev/null +++ b/deps/jemalloc/jemalloc.pc.in @@ -0,0 +1,12 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ +install_suffix=@install_suffix@ + +Name: jemalloc +Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. +URL: http://jemalloc.net/ +Version: @jemalloc_version_major@.@jemalloc_version_minor@.@jemalloc_version_bugfix@_@jemalloc_version_nrev@ +Cflags: -I${includedir} +Libs: -L${libdir} -ljemalloc${install_suffix} diff --git a/deps/jemalloc/m4/ax_cxx_compile_stdcxx.m4 b/deps/jemalloc/m4/ax_cxx_compile_stdcxx.m4 new file mode 100644 index 0000000..2c18e49 --- /dev/null +++ b/deps/jemalloc/m4/ax_cxx_compile_stdcxx.m4 @@ -0,0 +1,562 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional]) +# +# DESCRIPTION +# +# Check for baseline language coverage in the compiler for the specified +# version of the C++ standard. If necessary, add switches to CXX and +# CXXCPP to enable support. VERSION may be '11' (for the C++11 standard) +# or '14' (for the C++14 standard). +# +# The second argument, if specified, indicates whether you insist on an +# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g. +# -std=c++11). If neither is specified, you get whatever works, with +# preference for an extended mode. +# +# The third argument, if specified 'mandatory' or if left unspecified, +# indicates that baseline support for the specified C++ standard is +# required and that the macro should error out if no mode with that +# support is found. If specified 'optional', then configuration proceeds +# regardless, after defining HAVE_CXX${VERSION} if and only if a +# supporting mode is found. +# +# LICENSE +# +# Copyright (c) 2008 Benjamin Kosnik +# Copyright (c) 2012 Zack Weinberg +# Copyright (c) 2013 Roy Stogner +# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov +# Copyright (c) 2015 Paul Norman +# Copyright (c) 2015 Moritz Klammler +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 4 + +dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro +dnl (serial version number 13). + +AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl + m4_if([$1], [11], [], + [$1], [14], [], + [$1], [17], [m4_fatal([support for C++17 not yet implemented in AX_CXX_COMPILE_STDCXX])], + [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl + m4_if([$2], [], [], + [$2], [ext], [], + [$2], [noext], [], + [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl + m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true], + [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true], + [$3], [optional], [ax_cxx_compile_cxx$1_required=false], + [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])]) + AC_LANG_PUSH([C++])dnl + ac_success=no + AC_CACHE_CHECK(whether $CXX supports C++$1 features by default, + ax_cv_cxx_compile_cxx$1, + [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], + [ax_cv_cxx_compile_cxx$1=yes], + [ax_cv_cxx_compile_cxx$1=no])]) + if test x$ax_cv_cxx_compile_cxx$1 = xyes; then + ac_success=yes + fi + + m4_if([$2], [noext], [], [dnl + if test x$ac_success = xno; then + for switch in -std=gnu++$1 -std=gnu++0x; do + cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) + AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, + $cachevar, + [ac_save_CXX="$CXX" + CXX="$CXX $switch" + AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], + [eval $cachevar=yes], + [eval $cachevar=no]) + CXX="$ac_save_CXX"]) + if eval test x\$$cachevar = xyes; then + CXX="$CXX $switch" + if test -n "$CXXCPP" ; then + CXXCPP="$CXXCPP $switch" + fi + ac_success=yes + break + fi + done + fi]) + + m4_if([$2], [ext], [], [dnl + if test x$ac_success = xno; then + dnl HP's aCC needs +std=c++11 according to: + dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf + dnl Cray's crayCC needs "-h std=c++11" + for switch in -std=c++$1 -std=c++0x +std=c++$1 "-h std=c++$1"; do + cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) + AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, + $cachevar, + [ac_save_CXX="$CXX" + CXX="$CXX $switch" + AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], + [eval $cachevar=yes], + [eval $cachevar=no]) + CXX="$ac_save_CXX"]) + if eval test x\$$cachevar = xyes; then + CXX="$CXX $switch" + if test -n "$CXXCPP" ; then + CXXCPP="$CXXCPP $switch" + fi + ac_success=yes + break + fi + done + fi]) + AC_LANG_POP([C++]) + if test x$ax_cxx_compile_cxx$1_required = xtrue; then + if test x$ac_success = xno; then + AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.]) + fi + fi + if test x$ac_success = xno; then + HAVE_CXX$1=0 + AC_MSG_NOTICE([No compiler with C++$1 support was found]) + else + HAVE_CXX$1=1 + AC_DEFINE(HAVE_CXX$1,1, + [define if the compiler supports basic C++$1 syntax]) + fi + AC_SUBST(HAVE_CXX$1) +]) + + +dnl Test body for checking C++11 support + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11], + _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 +) + + +dnl Test body for checking C++14 support + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14], + _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 + _AX_CXX_COMPILE_STDCXX_testbody_new_in_14 +) + + +dnl Tests for new features in C++11 + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[ + +// If the compiler admits that it is not ready for C++11, why torture it? +// Hopefully, this will speed up the test. + +#ifndef __cplusplus + +#error "This is not a C++ compiler" + +#elif __cplusplus < 201103L + +#error "This is not a C++11 compiler" + +#else + +namespace cxx11 +{ + + namespace test_static_assert + { + + template + struct check + { + static_assert(sizeof(int) <= sizeof(T), "not big enough"); + }; + + } + + namespace test_final_override + { + + struct Base + { + virtual void f() {} + }; + + struct Derived : public Base + { + virtual void f() override {} + }; + + } + + namespace test_double_right_angle_brackets + { + + template < typename T > + struct check {}; + + typedef check single_type; + typedef check> double_type; + typedef check>> triple_type; + typedef check>>> quadruple_type; + + } + + namespace test_decltype + { + + int + f() + { + int a = 1; + decltype(a) b = 2; + return a + b; + } + + } + + namespace test_type_deduction + { + + template < typename T1, typename T2 > + struct is_same + { + static const bool value = false; + }; + + template < typename T > + struct is_same + { + static const bool value = true; + }; + + template < typename T1, typename T2 > + auto + add(T1 a1, T2 a2) -> decltype(a1 + a2) + { + return a1 + a2; + } + + int + test(const int c, volatile int v) + { + static_assert(is_same::value == true, ""); + static_assert(is_same::value == false, ""); + static_assert(is_same::value == false, ""); + auto ac = c; + auto av = v; + auto sumi = ac + av + 'x'; + auto sumf = ac + av + 1.0; + static_assert(is_same::value == true, ""); + static_assert(is_same::value == true, ""); + static_assert(is_same::value == true, ""); + static_assert(is_same::value == false, ""); + static_assert(is_same::value == true, ""); + return (sumf > 0.0) ? sumi : add(c, v); + } + + } + + namespace test_noexcept + { + + int f() { return 0; } + int g() noexcept { return 0; } + + static_assert(noexcept(f()) == false, ""); + static_assert(noexcept(g()) == true, ""); + + } + + namespace test_constexpr + { + + template < typename CharT > + unsigned long constexpr + strlen_c_r(const CharT *const s, const unsigned long acc) noexcept + { + return *s ? strlen_c_r(s + 1, acc + 1) : acc; + } + + template < typename CharT > + unsigned long constexpr + strlen_c(const CharT *const s) noexcept + { + return strlen_c_r(s, 0UL); + } + + static_assert(strlen_c("") == 0UL, ""); + static_assert(strlen_c("1") == 1UL, ""); + static_assert(strlen_c("example") == 7UL, ""); + static_assert(strlen_c("another\0example") == 7UL, ""); + + } + + namespace test_rvalue_references + { + + template < int N > + struct answer + { + static constexpr int value = N; + }; + + answer<1> f(int&) { return answer<1>(); } + answer<2> f(const int&) { return answer<2>(); } + answer<3> f(int&&) { return answer<3>(); } + + void + test() + { + int i = 0; + const int c = 0; + static_assert(decltype(f(i))::value == 1, ""); + static_assert(decltype(f(c))::value == 2, ""); + static_assert(decltype(f(0))::value == 3, ""); + } + + } + + namespace test_uniform_initialization + { + + struct test + { + static const int zero {}; + static const int one {1}; + }; + + static_assert(test::zero == 0, ""); + static_assert(test::one == 1, ""); + + } + + namespace test_lambdas + { + + void + test1() + { + auto lambda1 = [](){}; + auto lambda2 = lambda1; + lambda1(); + lambda2(); + } + + int + test2() + { + auto a = [](int i, int j){ return i + j; }(1, 2); + auto b = []() -> int { return '0'; }(); + auto c = [=](){ return a + b; }(); + auto d = [&](){ return c; }(); + auto e = [a, &b](int x) mutable { + const auto identity = [](int y){ return y; }; + for (auto i = 0; i < a; ++i) + a += b--; + return x + identity(a + b); + }(0); + return a + b + c + d + e; + } + + int + test3() + { + const auto nullary = [](){ return 0; }; + const auto unary = [](int x){ return x; }; + using nullary_t = decltype(nullary); + using unary_t = decltype(unary); + const auto higher1st = [](nullary_t f){ return f(); }; + const auto higher2nd = [unary](nullary_t f1){ + return [unary, f1](unary_t f2){ return f2(unary(f1())); }; + }; + return higher1st(nullary) + higher2nd(nullary)(unary); + } + + } + + namespace test_variadic_templates + { + + template + struct sum; + + template + struct sum + { + static constexpr auto value = N0 + sum::value; + }; + + template <> + struct sum<> + { + static constexpr auto value = 0; + }; + + static_assert(sum<>::value == 0, ""); + static_assert(sum<1>::value == 1, ""); + static_assert(sum<23>::value == 23, ""); + static_assert(sum<1, 2>::value == 3, ""); + static_assert(sum<5, 5, 11>::value == 21, ""); + static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); + + } + + // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae + // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function + // because of this. + namespace test_template_alias_sfinae + { + + struct foo {}; + + template + using member = typename T::member_type; + + template + void func(...) {} + + template + void func(member*) {} + + void test(); + + void test() { func(0); } + + } + +} // namespace cxx11 + +#endif // __cplusplus >= 201103L + +]]) + + +dnl Tests for new features in C++14 + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[ + +// If the compiler admits that it is not ready for C++14, why torture it? +// Hopefully, this will speed up the test. + +#ifndef __cplusplus + +#error "This is not a C++ compiler" + +#elif __cplusplus < 201402L + +#error "This is not a C++14 compiler" + +#else + +namespace cxx14 +{ + + namespace test_polymorphic_lambdas + { + + int + test() + { + const auto lambda = [](auto&&... args){ + const auto istiny = [](auto x){ + return (sizeof(x) == 1UL) ? 1 : 0; + }; + const int aretiny[] = { istiny(args)... }; + return aretiny[0]; + }; + return lambda(1, 1L, 1.0f, '1'); + } + + } + + namespace test_binary_literals + { + + constexpr auto ivii = 0b0000000000101010; + static_assert(ivii == 42, "wrong value"); + + } + + namespace test_generalized_constexpr + { + + template < typename CharT > + constexpr unsigned long + strlen_c(const CharT *const s) noexcept + { + auto length = 0UL; + for (auto p = s; *p; ++p) + ++length; + return length; + } + + static_assert(strlen_c("") == 0UL, ""); + static_assert(strlen_c("x") == 1UL, ""); + static_assert(strlen_c("test") == 4UL, ""); + static_assert(strlen_c("another\0test") == 7UL, ""); + + } + + namespace test_lambda_init_capture + { + + int + test() + { + auto x = 0; + const auto lambda1 = [a = x](int b){ return a + b; }; + const auto lambda2 = [a = lambda1(x)](){ return a; }; + return lambda2(); + } + + } + + namespace test_digit_seperators + { + + constexpr auto ten_million = 100'000'000; + static_assert(ten_million == 100000000, ""); + + } + + namespace test_return_type_deduction + { + + auto f(int& x) { return x; } + decltype(auto) g(int& x) { return x; } + + template < typename T1, typename T2 > + struct is_same + { + static constexpr auto value = false; + }; + + template < typename T > + struct is_same + { + static constexpr auto value = true; + }; + + int + test() + { + auto x = 0; + static_assert(is_same::value, ""); + static_assert(is_same::value, ""); + return x; + } + + } + +} // namespace cxx14 + +#endif // __cplusplus >= 201402L + +]]) diff --git a/deps/jemalloc/msvc/ReadMe.txt b/deps/jemalloc/msvc/ReadMe.txt new file mode 100644 index 0000000..633a7d4 --- /dev/null +++ b/deps/jemalloc/msvc/ReadMe.txt @@ -0,0 +1,23 @@ + +How to build jemalloc for Windows +================================= + +1. Install Cygwin with at least the following packages: + * autoconf + * autogen + * gawk + * grep + * sed + +2. Install Visual Studio 2015 or 2017 with Visual C++ + +3. Add Cygwin\bin to the PATH environment variable + +4. Open "x64 Native Tools Command Prompt for VS 2017" + (note: x86/x64 doesn't matter at this point) + +5. Generate header files: + sh -c "CC=cl ./autogen.sh" + +6. Now the project can be opened and built in Visual Studio: + msvc\jemalloc_vc2017.sln diff --git a/deps/jemalloc/msvc/jemalloc_vc2015.sln b/deps/jemalloc/msvc/jemalloc_vc2015.sln new file mode 100644 index 0000000..aedd5e5 --- /dev/null +++ b/deps/jemalloc/msvc/jemalloc_vc2015.sln @@ -0,0 +1,63 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.24720.0 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}" + ProjectSection(SolutionItems) = preProject + ReadMe.txt = ReadMe.txt + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2015\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2015\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Debug-static|x64 = Debug-static|x64 + Debug-static|x86 = Debug-static|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + Release-static|x64 = Release-static|x64 + Release-static|x86 = Release-static|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/deps/jemalloc/msvc/jemalloc_vc2017.sln b/deps/jemalloc/msvc/jemalloc_vc2017.sln new file mode 100644 index 0000000..c22fcb4 --- /dev/null +++ b/deps/jemalloc/msvc/jemalloc_vc2017.sln @@ -0,0 +1,63 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.24720.0 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}" + ProjectSection(SolutionItems) = preProject + ReadMe.txt = ReadMe.txt + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2017\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2017\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Debug-static|x64 = Debug-static|x64 + Debug-static|x86 = Debug-static|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + Release-static|x64 = Release-static|x64 + Release-static|x86 = Release-static|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj new file mode 100644 index 0000000..228e8be --- /dev/null +++ b/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj @@ -0,0 +1,350 @@ + + + + + Debug-static + Win32 + + + Debug-static + x64 + + + Debug + Win32 + + + Release-static + Win32 + + + Release-static + x64 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {8D6BB292-9E1C-413D-9F98-4864BDC1514A} + Win32Proj + jemalloc + 8.1 + + + + DynamicLibrary + true + v140 + MultiByte + + + StaticLibrary + true + v140 + MultiByte + + + DynamicLibrary + false + v140 + true + MultiByte + + + StaticLibrary + false + v140 + true + MultiByte + + + DynamicLibrary + true + v140 + MultiByte + + + StaticLibrary + true + v140 + MultiByte + + + DynamicLibrary + false + v140 + true + MultiByte + + + StaticLibrary + false + v140 + true + MultiByte + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)d + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-$(PlatformToolset)-$(Configuration) + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-$(PlatformToolset)-$(Configuration) + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)d + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) + + + + + + Level3 + Disabled + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + + + + + + + Level3 + Disabled + JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + + + + + + + Level3 + Disabled + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + + + + + + + Level3 + Disabled + JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + 4090;4146;4267;4334 + OldStyle + false + + + Windows + true + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + 4090;4146;4267;4334 + OldStyle + + + Windows + true + true + true + + + + + + diff --git a/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters new file mode 100644 index 0000000..d839515 --- /dev/null +++ b/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters @@ -0,0 +1,107 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + diff --git a/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj b/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj new file mode 100644 index 0000000..325876d --- /dev/null +++ b/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj @@ -0,0 +1,327 @@ + + + + + Debug-static + Win32 + + + Debug-static + x64 + + + Debug + Win32 + + + Release-static + Win32 + + + Release-static + x64 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + {09028CFD-4EB7-491D-869C-0708DB97ED44} + Win32Proj + test_threads + 8.1 + + + + Application + true + v140 + MultiByte + + + Application + true + v140 + MultiByte + + + Application + false + v140 + true + MultiByte + + + Application + false + v140 + true + MultiByte + + + Application + true + v140 + MultiByte + + + Application + true + v140 + MultiByte + + + Application + false + v140 + true + MultiByte + + + Application + false + v140 + true + MultiByte + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + true + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + true + + + true + $(SolutionDir)$(Platform)\$(Configuration)\ + + + true + $(SolutionDir)$(Platform)\$(Configuration)\ + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + $(SolutionDir)$(Platform)\$(Configuration) + jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + Level3 + Disabled + JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + + + Console + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + Level3 + Disabled + _DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + $(SolutionDir)$(Platform)\$(Configuration) + + + + + + + Level3 + Disabled + JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + + + Console + true + jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + $(SolutionDir)$(Platform)\$(Configuration) + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + Level3 + + + MaxSpeed + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + {8d6bb292-9e1c-413d-9f98-4864bdc1514a} + + + + + + + + + \ No newline at end of file diff --git a/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters b/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters new file mode 100644 index 0000000..fa4588f --- /dev/null +++ b/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters @@ -0,0 +1,26 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;xsd + + + + + Source Files + + + Source Files + + + + + Header Files + + + \ No newline at end of file diff --git a/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj b/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj new file mode 100644 index 0000000..edcceed --- /dev/null +++ b/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj @@ -0,0 +1,350 @@ + + + + + Debug-static + Win32 + + + Debug-static + x64 + + + Debug + Win32 + + + Release-static + Win32 + + + Release-static + x64 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {8D6BB292-9E1C-413D-9F98-4864BDC1514A} + Win32Proj + jemalloc + + + + DynamicLibrary + true + v141 + MultiByte + + + StaticLibrary + true + v141 + MultiByte + + + DynamicLibrary + false + v141 + true + MultiByte + + + StaticLibrary + false + v141 + true + MultiByte + + + DynamicLibrary + true + v141 + MultiByte + + + StaticLibrary + true + v141 + MultiByte + + + DynamicLibrary + false + v141 + true + MultiByte + + + StaticLibrary + false + v141 + true + MultiByte + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)d + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-$(PlatformToolset)-$(Configuration) + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-$(PlatformToolset)-$(Configuration) + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)d + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) + + + + + + Level3 + Disabled + _REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + + + + + + + Level3 + Disabled + JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + + + + + + + Level3 + Disabled + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + + + + + + + Level3 + Disabled + JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + 4090;4146;4267;4334 + OldStyle + false + + + Windows + true + + + + + Level3 + + + MaxSpeed + true + true + _REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + _REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + 4090;4146;4267;4334 + OldStyle + + + Windows + true + true + true + + + + + + diff --git a/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters b/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters new file mode 100644 index 0000000..6df7260 --- /dev/null +++ b/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters @@ -0,0 +1,110 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + diff --git a/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj b/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj new file mode 100644 index 0000000..c35b0f5 --- /dev/null +++ b/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj @@ -0,0 +1,326 @@ + + + + + Debug-static + Win32 + + + Debug-static + x64 + + + Debug + Win32 + + + Release-static + Win32 + + + Release-static + x64 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + {09028CFD-4EB7-491D-869C-0708DB97ED44} + Win32Proj + test_threads + + + + Application + true + v141 + MultiByte + + + Application + true + v141 + MultiByte + + + Application + false + v141 + true + MultiByte + + + Application + false + v141 + true + MultiByte + + + Application + true + v141 + MultiByte + + + Application + true + v141 + MultiByte + + + Application + false + v141 + true + MultiByte + + + Application + false + v141 + true + MultiByte + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + true + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + true + + + true + $(SolutionDir)$(Platform)\$(Configuration)\ + + + true + $(SolutionDir)$(Platform)\$(Configuration)\ + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + $(SolutionDir)$(Platform)\$(Configuration) + jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + Level3 + Disabled + JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + + + Console + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + Level3 + Disabled + _DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + $(SolutionDir)$(Platform)\$(Configuration) + + + + + + + Level3 + Disabled + JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + + + Console + true + jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + $(SolutionDir)$(Platform)\$(Configuration) + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + Level3 + + + MaxSpeed + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + {8d6bb292-9e1c-413d-9f98-4864bdc1514a} + + + + + + + + + \ No newline at end of file diff --git a/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters b/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters new file mode 100644 index 0000000..fa4588f --- /dev/null +++ b/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters @@ -0,0 +1,26 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;xsd + + + + + Source Files + + + Source Files + + + + + Header Files + + + \ No newline at end of file diff --git a/deps/jemalloc/msvc/test_threads/test_threads.cpp b/deps/jemalloc/msvc/test_threads/test_threads.cpp new file mode 100644 index 0000000..92e3162 --- /dev/null +++ b/deps/jemalloc/msvc/test_threads/test_threads.cpp @@ -0,0 +1,88 @@ +// jemalloc C++ threaded test +// Author: Rustam Abdullaev +// Public Domain + +#include +#include +#include +#include +#include +#include +#include +#include + +using std::vector; +using std::thread; +using std::uniform_int_distribution; +using std::minstd_rand; + +int test_threads() { + je_malloc_conf = "narenas:3"; + int narenas = 0; + size_t sz = sizeof(narenas); + je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0); + if (narenas != 3) { + printf("Error: unexpected number of arenas: %d\n", narenas); + return 1; + } + static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 }; + static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0])); + vector workers; + static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50; + je_malloc_stats_print(NULL, NULL, NULL); + size_t allocated1; + size_t sz1 = sizeof(allocated1); + je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0); + printf("\nPress Enter to start threads...\n"); + getchar(); + printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2); + for (int i = 0; i < numThreads; i++) { + workers.emplace_back([tid=i]() { + uniform_int_distribution sizeDist(0, numSizes - 1); + minstd_rand rnd(tid * 17); + uint8_t* ptrs[numAllocsMax]; + int ptrsz[numAllocsMax]; + for (int i = 0; i < numIter1; ++i) { + thread t([&]() { + for (int i = 0; i < numIter2; ++i) { + const int numAllocs = numAllocsMax - sizeDist(rnd); + for (int j = 0; j < numAllocs; j += 64) { + const int x = sizeDist(rnd); + const int sz = sizes[x]; + ptrsz[j] = sz; + ptrs[j] = (uint8_t*)je_malloc(sz); + if (!ptrs[j]) { + printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x); + exit(1); + } + for (int k = 0; k < sz; k++) + ptrs[j][k] = tid + k; + } + for (int j = 0; j < numAllocs; j += 64) { + for (int k = 0, sz = ptrsz[j]; k < sz; k++) + if (ptrs[j][k] != (uint8_t)(tid + k)) { + printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k)); + exit(1); + } + je_free(ptrs[j]); + } + } + }); + t.join(); + } + }); + } + for (thread& t : workers) { + t.join(); + } + je_malloc_stats_print(NULL, NULL, NULL); + size_t allocated2; + je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0); + size_t leaked = allocated2 - allocated1; + printf("\nDone. Leaked: %zd bytes\n", leaked); + bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet) + printf("\nTest %s!\n", (failed ? "FAILED" : "successful")); + printf("\nPress Enter to continue...\n"); + getchar(); + return failed ? 1 : 0; +} diff --git a/deps/jemalloc/msvc/test_threads/test_threads.h b/deps/jemalloc/msvc/test_threads/test_threads.h new file mode 100644 index 0000000..64d0cdb --- /dev/null +++ b/deps/jemalloc/msvc/test_threads/test_threads.h @@ -0,0 +1,3 @@ +#pragma once + +int test_threads(); diff --git a/deps/jemalloc/msvc/test_threads/test_threads_main.cpp b/deps/jemalloc/msvc/test_threads/test_threads_main.cpp new file mode 100644 index 0000000..0a022fb --- /dev/null +++ b/deps/jemalloc/msvc/test_threads/test_threads_main.cpp @@ -0,0 +1,11 @@ +#include "test_threads.h" +#include +#include +#include + +using namespace std::chrono_literals; + +int main(int argc, char** argv) { + int rc = test_threads(); + return rc; +} diff --git a/deps/jemalloc/run_tests.sh b/deps/jemalloc/run_tests.sh new file mode 100755 index 0000000..b434f15 --- /dev/null +++ b/deps/jemalloc/run_tests.sh @@ -0,0 +1 @@ +$(dirname "$)")/scripts/gen_run_tests.py | bash diff --git a/deps/jemalloc/scripts/gen_run_tests.py b/deps/jemalloc/scripts/gen_run_tests.py new file mode 100755 index 0000000..a414f81 --- /dev/null +++ b/deps/jemalloc/scripts/gen_run_tests.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python + +import sys +from itertools import combinations +from os import uname +from multiprocessing import cpu_count +from subprocess import call + +# Later, we want to test extended vaddr support. Apparently, the "real" way of +# checking this is flaky on OS X. +bits_64 = sys.maxsize > 2**32 + +nparallel = cpu_count() * 2 + +uname = uname()[0] + +if "BSD" in uname: + make_cmd = 'gmake' +else: + make_cmd = 'make' + +def powerset(items): + result = [] + for i in xrange(len(items) + 1): + result += combinations(items, i) + return result + +possible_compilers = [] +for cc, cxx in (['gcc', 'g++'], ['clang', 'clang++']): + try: + cmd_ret = call([cc, "-v"]) + if cmd_ret == 0: + possible_compilers.append((cc, cxx)) + except: + pass +possible_compiler_opts = [ + '-m32', +] +possible_config_opts = [ + '--enable-debug', + '--enable-prof', + '--disable-stats', + '--enable-opt-safety-checks', +] +if bits_64: + possible_config_opts.append('--with-lg-vaddr=56') + +possible_malloc_conf_opts = [ + 'tcache:false', + 'dss:primary', + 'percpu_arena:percpu', + 'background_thread:true', +] + +print 'set -e' +print 'if [ -f Makefile ] ; then %(make_cmd)s relclean ; fi' % {'make_cmd': make_cmd} +print 'autoconf' +print 'rm -rf run_tests.out' +print 'mkdir run_tests.out' +print 'cd run_tests.out' + +ind = 0 +for cc, cxx in possible_compilers: + for compiler_opts in powerset(possible_compiler_opts): + for config_opts in powerset(possible_config_opts): + for malloc_conf_opts in powerset(possible_malloc_conf_opts): + if cc is 'clang' \ + and '-m32' in possible_compiler_opts \ + and '--enable-prof' in config_opts: + continue + config_line = ( + 'EXTRA_CFLAGS=-Werror EXTRA_CXXFLAGS=-Werror ' + + 'CC="{} {}" '.format(cc, " ".join(compiler_opts)) + + 'CXX="{} {}" '.format(cxx, " ".join(compiler_opts)) + + '../../configure ' + + " ".join(config_opts) + (' --with-malloc-conf=' + + ",".join(malloc_conf_opts) if len(malloc_conf_opts) > 0 + else '') + ) + + # We don't want to test large vaddr spaces in 32-bit mode. + if ('-m32' in compiler_opts and '--with-lg-vaddr=56' in + config_opts): + continue + + # Per CPU arenas are only supported on Linux. + linux_supported = ('percpu_arena:percpu' in malloc_conf_opts \ + or 'background_thread:true' in malloc_conf_opts) + # Heap profiling and dss are not supported on OS X. + darwin_unsupported = ('--enable-prof' in config_opts or \ + 'dss:primary' in malloc_conf_opts) + if (uname == 'Linux' and linux_supported) \ + or (not linux_supported and (uname != 'Darwin' or \ + not darwin_unsupported)): + print """cat < run_test_%(ind)d.sh +#!/bin/sh + +set -e + +abort() { + echo "==> Error" >> run_test.log + echo "Error; see run_tests.out/run_test_%(ind)d.out/run_test.log" + exit 255 # Special exit code tells xargs to terminate. +} + +# Environment variables are not supported. +run_cmd() { + echo "==> \$@" >> run_test.log + \$@ >> run_test.log 2>&1 || abort +} + +echo "=> run_test_%(ind)d: %(config_line)s" +mkdir run_test_%(ind)d.out +cd run_test_%(ind)d.out + +echo "==> %(config_line)s" >> run_test.log +%(config_line)s >> run_test.log 2>&1 || abort + +run_cmd %(make_cmd)s all tests +run_cmd %(make_cmd)s check +run_cmd %(make_cmd)s distclean +EOF +chmod 755 run_test_%(ind)d.sh""" % {'ind': ind, 'config_line': config_line, 'make_cmd': make_cmd} + ind += 1 + +print 'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs -P %(nparallel)d -n 1 sh' % {'last_ind': ind-1, 'nparallel': nparallel} diff --git a/deps/jemalloc/scripts/gen_travis.py b/deps/jemalloc/scripts/gen_travis.py new file mode 100755 index 0000000..f1478c6 --- /dev/null +++ b/deps/jemalloc/scripts/gen_travis.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python + +from itertools import combinations + +travis_template = """\ +language: generic +dist: precise + +matrix: + include: +%s + +before_script: + - autoconf + - scripts/gen_travis.py > travis_script && diff .travis.yml travis_script + - ./configure ${COMPILER_FLAGS:+ \ + CC="$CC $COMPILER_FLAGS" \ + CXX="$CXX $COMPILER_FLAGS" } \ + $CONFIGURE_FLAGS + - make -j3 + - make -j3 tests + +script: + - make check +""" + +# The 'default' configuration is gcc, on linux, with no compiler or configure +# flags. We also test with clang, -m32, --enable-debug, --enable-prof, +# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing +# travis though, we don't test all 2**7 = 128 possible combinations of these; +# instead, we only test combinations of up to 2 'unusual' settings, under the +# hope that bugs involving interactions of such settings are rare. +# Things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29 +MAX_UNUSUAL_OPTIONS = 2 + +os_default = 'linux' +os_unusual = 'osx' + +compilers_default = 'CC=gcc CXX=g++' +compilers_unusual = 'CC=clang CXX=clang++' + +compiler_flag_unusuals = ['-m32'] + +configure_flag_unusuals = [ + '--enable-debug', + '--enable-prof', + '--disable-stats', + '--disable-libdl', + '--enable-opt-safety-checks', +] + +malloc_conf_unusuals = [ + 'tcache:false', + 'dss:primary', + 'percpu_arena:percpu', + 'background_thread:true', +] + +all_unusuals = ( + [os_unusual] + [compilers_unusual] + compiler_flag_unusuals + + configure_flag_unusuals + malloc_conf_unusuals +) + +unusual_combinations_to_test = [] +for i in xrange(MAX_UNUSUAL_OPTIONS + 1): + unusual_combinations_to_test += combinations(all_unusuals, i) + +gcc_multilib_set = False +# Formats a job from a combination of flags +def format_job(combination): + global gcc_multilib_set + + os = os_unusual if os_unusual in combination else os_default + compilers = compilers_unusual if compilers_unusual in combination else compilers_default + + compiler_flags = [x for x in combination if x in compiler_flag_unusuals] + configure_flags = [x for x in combination if x in configure_flag_unusuals] + malloc_conf = [x for x in combination if x in malloc_conf_unusuals] + + # Filter out unsupported configurations on OS X. + if os == 'osx' and ('dss:primary' in malloc_conf or \ + 'percpu_arena:percpu' in malloc_conf or 'background_thread:true' \ + in malloc_conf): + return "" + if len(malloc_conf) > 0: + configure_flags.append('--with-malloc-conf=' + ",".join(malloc_conf)) + + # Filter out an unsupported configuration - heap profiling on OS X. + if os == 'osx' and '--enable-prof' in configure_flags: + return "" + + # We get some spurious errors when -Warray-bounds is enabled. + env_string = ('{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}" ' + 'EXTRA_CFLAGS="-Werror -Wno-array-bounds"').format( + compilers, " ".join(compiler_flags), " ".join(configure_flags)) + + job = "" + job += ' - os: %s\n' % os + job += ' env: %s\n' % env_string + if '-m32' in combination and os == 'linux': + job += ' addons:' + if gcc_multilib_set: + job += ' *gcc_multilib\n' + else: + job += ' &gcc_multilib\n' + job += ' apt:\n' + job += ' packages:\n' + job += ' - gcc-multilib\n' + gcc_multilib_set = True + return job + +include_rows = "" +for combination in unusual_combinations_to_test: + include_rows += format_job(combination) + +# Development build +include_rows += '''\ + # Development build + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-cache-oblivious --enable-stats --enable-log --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" +''' + +# Enable-expermental-smallocx +include_rows += '''\ + # --enable-expermental-smallocx: + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-experimental-smallocx --enable-stats --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" +''' + +# Valgrind build bots +include_rows += ''' + # Valgrind + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind" + addons: + apt: + packages: + - valgrind +''' + +# To enable valgrind on macosx add: +# +# - os: osx +# env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind" +# install: brew install valgrind +# +# It currently fails due to: https://github.com/jemalloc/jemalloc/issues/1274 + +print travis_template % include_rows diff --git a/deps/jemalloc/src/arena.c b/deps/jemalloc/src/arena.c new file mode 100644 index 0000000..ba50e41 --- /dev/null +++ b/deps/jemalloc/src/arena.c @@ -0,0 +1,2298 @@ +#define JEMALLOC_ARENA_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/div.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/safety_check.h" +#include "jemalloc/internal/util.h" + +JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS + +/******************************************************************************/ +/* Data. */ + +/* + * Define names for both unininitialized and initialized phases, so that + * options and mallctl processing are straightforward. + */ +const char *percpu_arena_mode_names[] = { + "percpu", + "phycpu", + "disabled", + "percpu", + "phycpu" +}; +percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; + +ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; +ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; + +static atomic_zd_t dirty_decay_ms_default; +static atomic_zd_t muzzy_decay_ms_default; + +const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { +#define STEP(step, h, x, y) \ + h, + SMOOTHSTEP +#undef STEP +}; + +static div_info_t arena_binind_div_info[SC_NBINS]; + +size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; +size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; +static unsigned huge_arena_ind; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, + size_t npages_decay_max, bool is_background_thread); +static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, + bool is_background_thread, bool all); +static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + bin_t *bin); +static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + bin_t *bin); + +/******************************************************************************/ + +void +arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy) { + *nthreads += arena_nthreads_get(arena, false); + *dss = dss_prec_names[arena_dss_prec_get(arena)]; + *dirty_decay_ms = arena_dirty_decay_ms_get(arena); + *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); + *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); + *ndirty += extents_npages_get(&arena->extents_dirty); + *nmuzzy += extents_npages_get(&arena->extents_muzzy); +} + +void +arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, + bin_stats_t *bstats, arena_stats_large_t *lstats, + arena_stats_extents_t *estats) { + cassert(config_stats); + + arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, + muzzy_decay_ms, nactive, ndirty, nmuzzy); + + size_t base_allocated, base_resident, base_mapped, metadata_thp; + base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, + &base_mapped, &metadata_thp); + + arena_stats_lock(tsdn, &arena->stats); + + arena_stats_accum_zu(&astats->mapped, base_mapped + + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); + arena_stats_accum_zu(&astats->retained, + extents_npages_get(&arena->extents_retained) << LG_PAGE); + + atomic_store_zu(&astats->extent_avail, + atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED), + ATOMIC_RELAXED); + + arena_stats_accum_u64(&astats->decay_dirty.npurge, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.npurge)); + arena_stats_accum_u64(&astats->decay_dirty.nmadvise, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.nmadvise)); + arena_stats_accum_u64(&astats->decay_dirty.purged, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.purged)); + + arena_stats_accum_u64(&astats->decay_muzzy.npurge, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.npurge)); + arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.nmadvise)); + arena_stats_accum_u64(&astats->decay_muzzy.purged, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.purged)); + + arena_stats_accum_zu(&astats->base, base_allocated); + arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); + arena_stats_accum_zu(&astats->metadata_thp, metadata_thp); + arena_stats_accum_zu(&astats->resident, base_resident + + (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + + extents_npages_get(&arena->extents_dirty) + + extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); + arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu( + &arena->stats.abandoned_vm, ATOMIC_RELAXED)); + + for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) { + uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].nmalloc); + arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); + arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); + + uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].ndalloc); + arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); + arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); + + uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].nrequests); + arena_stats_accum_u64(&lstats[i].nrequests, + nmalloc + nrequests); + arena_stats_accum_u64(&astats->nrequests_large, + nmalloc + nrequests); + + /* nfill == nmalloc for large currently. */ + arena_stats_accum_u64(&lstats[i].nfills, nmalloc); + arena_stats_accum_u64(&astats->nfills_large, nmalloc); + + uint64_t nflush = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].nflushes); + arena_stats_accum_u64(&lstats[i].nflushes, nflush); + arena_stats_accum_u64(&astats->nflushes_large, nflush); + + assert(nmalloc >= ndalloc); + assert(nmalloc - ndalloc <= SIZE_T_MAX); + size_t curlextents = (size_t)(nmalloc - ndalloc); + lstats[i].curlextents += curlextents; + arena_stats_accum_zu(&astats->allocated_large, + curlextents * sz_index2size(SC_NBINS + i)); + } + + for (pszind_t i = 0; i < SC_NPSIZES; i++) { + size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes, + retained_bytes; + dirty = extents_nextents_get(&arena->extents_dirty, i); + muzzy = extents_nextents_get(&arena->extents_muzzy, i); + retained = extents_nextents_get(&arena->extents_retained, i); + dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i); + muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i); + retained_bytes = + extents_nbytes_get(&arena->extents_retained, i); + + atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED); + atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED); + atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED); + atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes, + ATOMIC_RELAXED); + atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes, + ATOMIC_RELAXED); + atomic_store_zu(&estats[i].retained_bytes, retained_bytes, + ATOMIC_RELAXED); + } + + arena_stats_unlock(tsdn, &arena->stats); + + /* tcache_bytes counts currently cached bytes. */ + atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + cache_bin_array_descriptor_t *descriptor; + ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { + szind_t i = 0; + for (; i < SC_NBINS; i++) { + cache_bin_t *tbin = &descriptor->bins_small[i]; + arena_stats_accum_zu(&astats->tcache_bytes, + tbin->ncached * sz_index2size(i)); + } + for (; i < nhbins; i++) { + cache_bin_t *tbin = &descriptor->bins_large[i]; + arena_stats_accum_zu(&astats->tcache_bytes, + tbin->ncached * sz_index2size(i)); + } + } + malloc_mutex_prof_read(tsdn, + &astats->mutex_prof_data[arena_prof_mutex_tcache_list], + &arena->tcache_ql_mtx); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); + +#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ + malloc_mutex_lock(tsdn, &arena->mtx); \ + malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ + &arena->mtx); \ + malloc_mutex_unlock(tsdn, &arena->mtx); + + /* Gather per arena mutex profiling data. */ + READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); + READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, + arena_prof_mutex_extent_avail) + READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, + arena_prof_mutex_extents_dirty) + READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, + arena_prof_mutex_extents_muzzy) + READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, + arena_prof_mutex_extents_retained) + READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, + arena_prof_mutex_decay_dirty) + READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, + arena_prof_mutex_decay_muzzy) + READ_ARENA_MUTEX_PROF_DATA(base->mtx, + arena_prof_mutex_base) +#undef READ_ARENA_MUTEX_PROF_DATA + + nstime_copy(&astats->uptime, &arena->create_time); + nstime_update(&astats->uptime); + nstime_subtract(&astats->uptime, &arena->create_time); + + for (szind_t i = 0; i < SC_NBINS; i++) { + for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { + bin_stats_merge(tsdn, &bstats[i], + &arena->bins[i].bin_shards[j]); + } + } +} + +void +arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, + extent); + if (arena_dirty_decay_ms_get(arena) == 0) { + arena_decay_dirty(tsdn, arena, false, true); + } else { + arena_background_thread_inactivity_check(tsdn, arena, false); + } +} + +static void * +arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { + void *ret; + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + size_t regind; + + assert(extent_nfree_get(slab) > 0); + assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); + + regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); + ret = (void *)((uintptr_t)extent_addr_get(slab) + + (uintptr_t)(bin_info->reg_size * regind)); + extent_nfree_dec(slab); + return ret; +} + +static void +arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info, + unsigned cnt, void** ptrs) { + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + + assert(extent_nfree_get(slab) >= cnt); + assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); + +#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE) + for (unsigned i = 0; i < cnt; i++) { + size_t regind = bitmap_sfu(slab_data->bitmap, + &bin_info->bitmap_info); + *(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) + + (uintptr_t)(bin_info->reg_size * regind)); + } +#else + unsigned group = 0; + bitmap_t g = slab_data->bitmap[group]; + unsigned i = 0; + while (i < cnt) { + while (g == 0) { + g = slab_data->bitmap[++group]; + } + size_t shift = group << LG_BITMAP_GROUP_NBITS; + size_t pop = popcount_lu(g); + if (pop > (cnt - i)) { + pop = cnt - i; + } + + /* + * Load from memory locations only once, outside the + * hot loop below. + */ + uintptr_t base = (uintptr_t)extent_addr_get(slab); + uintptr_t regsize = (uintptr_t)bin_info->reg_size; + while (pop--) { + size_t bit = cfs_lu(&g); + size_t regind = shift + bit; + *(ptrs + i) = (void *)(base + regsize * regind); + + i++; + } + slab_data->bitmap[group] = g; + } +#endif + extent_nfree_sub(slab, cnt); +} + +#ifndef JEMALLOC_JET +static +#endif +size_t +arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { + size_t diff, regind; + + /* Freeing a pointer outside the slab can cause assertion failure. */ + assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); + assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); + /* Freeing an interior pointer can cause assertion failure. */ + assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % + (uintptr_t)bin_infos[binind].reg_size == 0); + + diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); + + /* Avoid doing division with a variable divisor. */ + regind = div_compute(&arena_binind_div_info[binind], diff); + + assert(regind < bin_infos[binind].nregs); + + return regind; +} + +static void +arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) { + szind_t binind = extent_szind_get(slab); + const bin_info_t *bin_info = &bin_infos[binind]; + size_t regind = arena_slab_regind(slab, binind, ptr); + + assert(extent_nfree_get(slab) < bin_info->nregs); + /* Freeing an unallocated pointer can cause assertion failure. */ + assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); + + bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); + extent_nfree_inc(slab); +} + +static void +arena_nactive_add(arena_t *arena, size_t add_pages) { + atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); +} + +static void +arena_nactive_sub(arena_t *arena, size_t sub_pages) { + assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); + atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); +} + +static void +arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { + szind_t index, hindex; + + cassert(config_stats); + + if (usize < SC_LARGE_MINCLASS) { + usize = SC_LARGE_MINCLASS; + } + index = sz_size2index(usize); + hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; + + arena_stats_add_u64(tsdn, &arena->stats, + &arena->stats.lstats[hindex].nmalloc, 1); +} + +static void +arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { + szind_t index, hindex; + + cassert(config_stats); + + if (usize < SC_LARGE_MINCLASS) { + usize = SC_LARGE_MINCLASS; + } + index = sz_size2index(usize); + hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; + + arena_stats_add_u64(tsdn, &arena->stats, + &arena->stats.lstats[hindex].ndalloc, 1); +} + +static void +arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, + size_t usize) { + arena_large_dalloc_stats_update(tsdn, arena, oldusize); + arena_large_malloc_stats_update(tsdn, arena, usize); +} + +static bool +arena_may_have_muzzy(arena_t *arena) { + return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0)); +} + +extent_t * +arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool *zero) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + szind_t szind = sz_size2index(usize); + size_t mapped_add; + bool commit = true; + extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, + szind, zero, &commit); + if (extent == NULL && arena_may_have_muzzy(arena)) { + extent = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, + false, szind, zero, &commit); + } + size_t size = usize + sz_large_pad; + if (extent == NULL) { + extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, + usize, sz_large_pad, alignment, false, szind, zero, + &commit); + if (config_stats) { + /* + * extent may be NULL on OOM, but in that case + * mapped_add isn't used below, so there's no need to + * conditionlly set it to 0 here. + */ + mapped_add = size; + } + } else if (config_stats) { + mapped_add = 0; + } + + if (extent != NULL) { + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_malloc_stats_update(tsdn, arena, usize); + if (mapped_add != 0) { + arena_stats_add_zu(tsdn, &arena->stats, + &arena->stats.mapped, mapped_add); + } + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_add(arena, size >> LG_PAGE); + } + + return extent; +} + +void +arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_dalloc_stats_update(tsdn, arena, + extent_usize_get(extent)); + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); +} + +void +arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + size_t oldusize) { + size_t usize = extent_usize_get(extent); + size_t udiff = oldusize - usize; + + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_sub(arena, udiff >> LG_PAGE); +} + +void +arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + size_t oldusize) { + size_t usize = extent_usize_get(extent); + size_t udiff = usize - oldusize; + + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_add(arena, udiff >> LG_PAGE); +} + +static ssize_t +arena_decay_ms_read(arena_decay_t *decay) { + return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); +} + +static void +arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { + atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); +} + +static void +arena_decay_deadline_init(arena_decay_t *decay) { + /* + * Generate a new deadline that is uniformly random within the next + * epoch after the current one. + */ + nstime_copy(&decay->deadline, &decay->epoch); + nstime_add(&decay->deadline, &decay->interval); + if (arena_decay_ms_read(decay) > 0) { + nstime_t jitter; + + nstime_init(&jitter, prng_range_u64(&decay->jitter_state, + nstime_ns(&decay->interval))); + nstime_add(&decay->deadline, &jitter); + } +} + +static bool +arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { + return (nstime_compare(&decay->deadline, time) <= 0); +} + +static size_t +arena_decay_backlog_npages_limit(const arena_decay_t *decay) { + uint64_t sum; + size_t npages_limit_backlog; + unsigned i; + + /* + * For each element of decay_backlog, multiply by the corresponding + * fixed-point smoothstep decay factor. Sum the products, then divide + * to round down to the nearest whole number of pages. + */ + sum = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + sum += decay->backlog[i] * h_steps[i]; + } + npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); + + return npages_limit_backlog; +} + +static void +arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { + size_t npages_delta = (current_npages > decay->nunpurged) ? + current_npages - decay->nunpurged : 0; + decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; + + if (config_debug) { + if (current_npages > decay->ceil_npages) { + decay->ceil_npages = current_npages; + } + size_t npages_limit = arena_decay_backlog_npages_limit(decay); + assert(decay->ceil_npages >= npages_limit); + if (decay->ceil_npages > npages_limit) { + decay->ceil_npages = npages_limit; + } + } +} + +static void +arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, + size_t current_npages) { + if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { + memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * + sizeof(size_t)); + } else { + size_t nadvance_z = (size_t)nadvance_u64; + + assert((uint64_t)nadvance_z == nadvance_u64); + + memmove(decay->backlog, &decay->backlog[nadvance_z], + (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); + if (nadvance_z > 1) { + memset(&decay->backlog[SMOOTHSTEP_NSTEPS - + nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); + } + } + + arena_decay_backlog_update_last(decay, current_npages); +} + +static void +arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, size_t current_npages, size_t npages_limit, + bool is_background_thread) { + if (current_npages > npages_limit) { + arena_decay_to_limit(tsdn, arena, decay, extents, false, + npages_limit, current_npages - npages_limit, + is_background_thread); + } +} + +static void +arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, + size_t current_npages) { + assert(arena_decay_deadline_reached(decay, time)); + + nstime_t delta; + nstime_copy(&delta, time); + nstime_subtract(&delta, &decay->epoch); + + uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); + assert(nadvance_u64 > 0); + + /* Add nadvance_u64 decay intervals to epoch. */ + nstime_copy(&delta, &decay->interval); + nstime_imultiply(&delta, nadvance_u64); + nstime_add(&decay->epoch, &delta); + + /* Set a new deadline. */ + arena_decay_deadline_init(decay); + + /* Update the backlog. */ + arena_decay_backlog_update(decay, nadvance_u64, current_npages); +} + +static void +arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, const nstime_t *time, bool is_background_thread) { + size_t current_npages = extents_npages_get(extents); + arena_decay_epoch_advance_helper(decay, time, current_npages); + + size_t npages_limit = arena_decay_backlog_npages_limit(decay); + /* We may unlock decay->mtx when try_purge(). Finish logging first. */ + decay->nunpurged = (npages_limit > current_npages) ? npages_limit : + current_npages; + + if (!background_thread_enabled() || is_background_thread) { + arena_decay_try_purge(tsdn, arena, decay, extents, + current_npages, npages_limit, is_background_thread); + } +} + +static void +arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) { + arena_decay_ms_write(decay, decay_ms); + if (decay_ms > 0) { + nstime_init(&decay->interval, (uint64_t)decay_ms * + KQU(1000000)); + nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); + } + + nstime_init(&decay->epoch, 0); + nstime_update(&decay->epoch); + decay->jitter_state = (uint64_t)(uintptr_t)decay; + arena_decay_deadline_init(decay); + decay->nunpurged = 0; + memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); +} + +static bool +arena_decay_init(arena_decay_t *decay, ssize_t decay_ms, + arena_stats_decay_t *stats) { + if (config_debug) { + for (size_t i = 0; i < sizeof(arena_decay_t); i++) { + assert(((char *)decay)[i] == 0); + } + decay->ceil_npages = 0; + } + if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, + malloc_mutex_rank_exclusive)) { + return true; + } + decay->purging = false; + arena_decay_reinit(decay, decay_ms); + /* Memory is zeroed, so there is no need to clear stats. */ + if (config_stats) { + decay->stats = stats; + } + return false; +} + +static bool +arena_decay_ms_valid(ssize_t decay_ms) { + if (decay_ms < -1) { + return false; + } + if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * + KQU(1000)) { + return true; + } + return false; +} + +static bool +arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool is_background_thread) { + malloc_mutex_assert_owner(tsdn, &decay->mtx); + + /* Purge all or nothing if the option is disabled. */ + ssize_t decay_ms = arena_decay_ms_read(decay); + if (decay_ms <= 0) { + if (decay_ms == 0) { + arena_decay_to_limit(tsdn, arena, decay, extents, false, + 0, extents_npages_get(extents), + is_background_thread); + } + return false; + } + + nstime_t time; + nstime_init(&time, 0); + nstime_update(&time); + if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) + > 0)) { + /* + * Time went backwards. Move the epoch back in time and + * generate a new deadline, with the expectation that time + * typically flows forward for long enough periods of time that + * epochs complete. Unfortunately, this strategy is susceptible + * to clock jitter triggering premature epoch advances, but + * clock jitter estimation and compensation isn't feasible here + * because calls into this code are event-driven. + */ + nstime_copy(&decay->epoch, &time); + arena_decay_deadline_init(decay); + } else { + /* Verify that time does not go backwards. */ + assert(nstime_compare(&decay->epoch, &time) <= 0); + } + + /* + * If the deadline has been reached, advance to the current epoch and + * purge to the new limit if necessary. Note that dirty pages created + * during the current epoch are not subject to purge until a future + * epoch, so as a result purging only happens during epoch advances, or + * being triggered by background threads (scheduled event). + */ + bool advance_epoch = arena_decay_deadline_reached(decay, &time); + if (advance_epoch) { + arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, + is_background_thread); + } else if (is_background_thread) { + arena_decay_try_purge(tsdn, arena, decay, extents, + extents_npages_get(extents), + arena_decay_backlog_npages_limit(decay), + is_background_thread); + } + + return advance_epoch; +} + +static ssize_t +arena_decay_ms_get(arena_decay_t *decay) { + return arena_decay_ms_read(decay); +} + +ssize_t +arena_dirty_decay_ms_get(arena_t *arena) { + return arena_decay_ms_get(&arena->decay_dirty); +} + +ssize_t +arena_muzzy_decay_ms_get(arena_t *arena) { + return arena_decay_ms_get(&arena->decay_muzzy); +} + +static bool +arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + + malloc_mutex_lock(tsdn, &decay->mtx); + /* + * Restart decay backlog from scratch, which may cause many dirty pages + * to be immediately purged. It would conceptually be possible to map + * the old backlog onto the new backlog, but there is no justification + * for such complexity since decay_ms changes are intended to be + * infrequent, either between the {-1, 0, >0} states, or a one-time + * arbitrary change during initial arena configuration. + */ + arena_decay_reinit(decay, decay_ms); + arena_maybe_decay(tsdn, arena, decay, extents, false); + malloc_mutex_unlock(tsdn, &decay->mtx); + + return false; +} + +bool +arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, + ssize_t decay_ms) { + return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, + &arena->extents_dirty, decay_ms); +} + +bool +arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, + ssize_t decay_ms) { + return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, + &arena->extents_muzzy, decay_ms); +} + +static size_t +arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, + size_t npages_decay_max, extent_list_t *decay_extents) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + /* Stash extents according to npages_limit. */ + size_t nstashed = 0; + extent_t *extent; + while (nstashed < npages_decay_max && + (extent = extents_evict(tsdn, arena, r_extent_hooks, extents, + npages_limit)) != NULL) { + extent_list_append(decay_extents, extent); + nstashed += extent_size_get(extent) >> LG_PAGE; + } + return nstashed; +} + +static size_t +arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, + bool all, extent_list_t *decay_extents, bool is_background_thread) { + size_t nmadvise, nunmapped; + size_t npurged; + + if (config_stats) { + nmadvise = 0; + nunmapped = 0; + } + npurged = 0; + + ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); + for (extent_t *extent = extent_list_first(decay_extents); extent != + NULL; extent = extent_list_first(decay_extents)) { + if (config_stats) { + nmadvise++; + } + size_t npages = extent_size_get(extent) >> LG_PAGE; + npurged += npages; + extent_list_remove(decay_extents, extent); + switch (extents_state_get(extents)) { + case extent_state_active: + not_reached(); + case extent_state_dirty: + if (!all && muzzy_decay_ms != 0 && + !extent_purge_lazy_wrapper(tsdn, arena, + r_extent_hooks, extent, 0, + extent_size_get(extent))) { + extents_dalloc(tsdn, arena, r_extent_hooks, + &arena->extents_muzzy, extent); + arena_background_thread_inactivity_check(tsdn, + arena, is_background_thread); + break; + } + /* Fall through. */ + case extent_state_muzzy: + extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, + extent); + if (config_stats) { + nunmapped += npages; + } + break; + case extent_state_retained: + default: + not_reached(); + } + } + + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, + 1); + arena_stats_add_u64(tsdn, &arena->stats, + &decay->stats->nmadvise, nmadvise); + arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, + npurged); + arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, + nunmapped << LG_PAGE); + arena_stats_unlock(tsdn, &arena->stats); + } + + return npurged; +} + +/* + * npages_limit: Decay at most npages_decay_max pages without violating the + * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper + * bound on number of pages in order to prevent unbounded growth (namely in + * stashed), otherwise unbounded new pages could be added to extents during the + * current decay run, so that the purging thread never finishes. + */ +static void +arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, + bool is_background_thread) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 1); + malloc_mutex_assert_owner(tsdn, &decay->mtx); + + if (decay->purging) { + return; + } + decay->purging = true; + malloc_mutex_unlock(tsdn, &decay->mtx); + + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + + extent_list_t decay_extents; + extent_list_init(&decay_extents); + + size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, + npages_limit, npages_decay_max, &decay_extents); + if (npurge != 0) { + size_t npurged = arena_decay_stashed(tsdn, arena, + &extent_hooks, decay, extents, all, &decay_extents, + is_background_thread); + assert(npurged == npurge); + } + + malloc_mutex_lock(tsdn, &decay->mtx); + decay->purging = false; +} + +static bool +arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool is_background_thread, bool all) { + if (all) { + malloc_mutex_lock(tsdn, &decay->mtx); + arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, + extents_npages_get(extents), is_background_thread); + malloc_mutex_unlock(tsdn, &decay->mtx); + + return false; + } + + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + /* No need to wait if another thread is in progress. */ + return true; + } + + bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, + is_background_thread); + size_t npages_new; + if (epoch_advanced) { + /* Backlog is updated on epoch advance. */ + npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; + } + malloc_mutex_unlock(tsdn, &decay->mtx); + + if (have_background_thread && background_thread_enabled() && + epoch_advanced && !is_background_thread) { + background_thread_interval_check(tsdn, arena, decay, + npages_new); + } + + return false; +} + +static bool +arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, + bool all) { + return arena_decay_impl(tsdn, arena, &arena->decay_dirty, + &arena->extents_dirty, is_background_thread, all); +} + +static bool +arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, + bool all) { + return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, + &arena->extents_muzzy, is_background_thread, all); +} + +void +arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { + if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { + return; + } + arena_decay_muzzy(tsdn, arena, is_background_thread, all); +} + +static void +arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { + arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); + + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); +} + +static void +arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { + assert(extent_nfree_get(slab) > 0); + extent_heap_insert(&bin->slabs_nonfull, slab); + if (config_stats) { + bin->stats.nonfull_slabs++; + } +} + +static void +arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { + extent_heap_remove(&bin->slabs_nonfull, slab); + if (config_stats) { + bin->stats.nonfull_slabs--; + } +} + +static extent_t * +arena_bin_slabs_nonfull_tryget(bin_t *bin) { + extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); + if (slab == NULL) { + return NULL; + } + if (config_stats) { + bin->stats.reslabs++; + bin->stats.nonfull_slabs--; + } + return slab; +} + +static void +arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { + assert(extent_nfree_get(slab) == 0); + /* + * Tracking extents is required by arena_reset, which is not allowed + * for auto arenas. Bypass this step to avoid touching the extent + * linkage (often results in cache misses) for auto arenas. + */ + if (arena_is_auto(arena)) { + return; + } + extent_list_append(&bin->slabs_full, slab); +} + +static void +arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { + if (arena_is_auto(arena)) { + return; + } + extent_list_remove(&bin->slabs_full, slab); +} + +static void +arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) { + extent_t *slab; + + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + if (bin->slabcur != NULL) { + slab = bin->slabcur; + bin->slabcur = NULL; + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) { + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + for (slab = extent_list_first(&bin->slabs_full); slab != NULL; + slab = extent_list_first(&bin->slabs_full)) { + arena_bin_slabs_full_remove(arena, bin, slab); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + if (config_stats) { + bin->stats.curregs = 0; + bin->stats.curslabs = 0; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); +} + +void +arena_reset(tsd_t *tsd, arena_t *arena) { + /* + * Locking in this function is unintuitive. The caller guarantees that + * no concurrent operations are happening in this arena, but there are + * still reasons that some locking is necessary: + * + * - Some of the functions in the transitive closure of calls assume + * appropriate locks are held, and in some cases these locks are + * temporarily dropped to avoid lock order reversal or deadlock due to + * reentry. + * - mallctl("epoch", ...) may concurrently refresh stats. While + * strictly speaking this is a "concurrent operation", disallowing + * stats refreshes would impose an inconvenient burden. + */ + + /* Large allocations. */ + malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); + + for (extent_t *extent = extent_list_first(&arena->large); extent != + NULL; extent = extent_list_first(&arena->large)) { + void *ptr = extent_base_get(extent); + size_t usize; + + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != SC_NSIZES); + + if (config_stats || (config_prof && opt_prof)) { + usize = sz_index2size(alloc_ctx.szind); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + } + /* Remove large allocation from prof sample set. */ + if (config_prof && opt_prof) { + prof_free(tsd, ptr, usize, &alloc_ctx); + } + large_dalloc(tsd_tsdn(tsd), extent); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); + + /* Bins. */ + for (unsigned i = 0; i < SC_NBINS; i++) { + for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { + arena_bin_reset(tsd, arena, + &arena->bins[i].bin_shards[j]); + } + } + + atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); +} + +static void +arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { + /* + * Iterate over the retained extents and destroy them. This gives the + * extent allocator underlying the extent hooks an opportunity to unmap + * all retained memory without having to keep its own metadata + * structures. In practice, virtual memory for dss-allocated extents is + * leaked here, so best practice is to avoid dss for arenas to be + * destroyed, or provide custom extent hooks that track retained + * dss-based extents for later reuse. + */ + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + extent_t *extent; + while ((extent = extents_evict(tsdn, arena, &extent_hooks, + &arena->extents_retained, 0)) != NULL) { + extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); + } +} + +void +arena_destroy(tsd_t *tsd, arena_t *arena) { + assert(base_ind_get(arena->base) >= narenas_auto); + assert(arena_nthreads_get(arena, false) == 0); + assert(arena_nthreads_get(arena, true) == 0); + + /* + * No allocations have occurred since arena_reset() was called. + * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached + * extents, so only retained extents may remain. + */ + assert(extents_npages_get(&arena->extents_dirty) == 0); + assert(extents_npages_get(&arena->extents_muzzy) == 0); + + /* Deallocate retained memory. */ + arena_destroy_retained(tsd_tsdn(tsd), arena); + + /* + * Remove the arena pointer from the arenas array. We rely on the fact + * that there is no way for the application to get a dirty read from the + * arenas array unless there is an inherent race in the application + * involving access of an arena being concurrently destroyed. The + * application must synchronize knowledge of the arena's validity, so as + * long as we use an atomic write to update the arenas array, the + * application will get a clean read any time after it synchronizes + * knowledge that the arena is no longer valid. + */ + arena_set(base_ind_get(arena->base), NULL); + + /* + * Destroy the base allocator, which manages all metadata ever mapped by + * this arena. + */ + base_delete(tsd_tsdn(tsd), arena->base); +} + +static extent_t * +arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info, + szind_t szind) { + extent_t *slab; + bool zero, commit; + + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + zero = false; + commit = true; + slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, + bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); + + if (config_stats && slab != NULL) { + arena_stats_mapped_add(tsdn, &arena->stats, + bin_info->slab_size); + } + + return slab; +} + +static extent_t * +arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard, + const bin_info_t *bin_info) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + szind_t szind = sz_size2index(bin_info->reg_size); + bool zero = false; + bool commit = true; + extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, + binind, &zero, &commit); + if (slab == NULL && arena_may_have_muzzy(arena)) { + slab = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, + true, binind, &zero, &commit); + } + if (slab == NULL) { + slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, + bin_info, szind); + if (slab == NULL) { + return NULL; + } + } + assert(extent_slab_get(slab)); + + /* Initialize slab internals. */ + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + extent_nfree_binshard_set(slab, bin_info->nregs, binshard); + bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); + + arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); + + return slab; +} + +static extent_t * +arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, + szind_t binind, unsigned binshard) { + extent_t *slab; + const bin_info_t *bin_info; + + /* Look for a usable slab. */ + slab = arena_bin_slabs_nonfull_tryget(bin); + if (slab != NULL) { + return slab; + } + /* No existing slabs have any space available. */ + + bin_info = &bin_infos[binind]; + + /* Allocate a new slab. */ + malloc_mutex_unlock(tsdn, &bin->lock); + /******************************/ + slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info); + /********************************/ + malloc_mutex_lock(tsdn, &bin->lock); + if (slab != NULL) { + if (config_stats) { + bin->stats.nslabs++; + bin->stats.curslabs++; + } + return slab; + } + + /* + * arena_slab_alloc() failed, but another thread may have made + * sufficient memory available while this one dropped bin->lock above, + * so search one more time. + */ + slab = arena_bin_slabs_nonfull_tryget(bin); + if (slab != NULL) { + return slab; + } + + return NULL; +} + +/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ +static void * +arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, + szind_t binind, unsigned binshard) { + const bin_info_t *bin_info; + extent_t *slab; + + bin_info = &bin_infos[binind]; + if (!arena_is_auto(arena) && bin->slabcur != NULL) { + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + bin->slabcur = NULL; + } + slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard); + if (bin->slabcur != NULL) { + /* + * Another thread updated slabcur while this one ran without the + * bin lock in arena_bin_nonfull_slab_get(). + */ + if (extent_nfree_get(bin->slabcur) > 0) { + void *ret = arena_slab_reg_alloc(bin->slabcur, + bin_info); + if (slab != NULL) { + /* + * arena_slab_alloc() may have allocated slab, + * or it may have been pulled from + * slabs_nonfull. Therefore it is unsafe to + * make any assumptions about how slab has + * previously been used, and + * arena_bin_lower_slab() must be called, as if + * a region were just deallocated from the slab. + */ + if (extent_nfree_get(slab) == bin_info->nregs) { + arena_dalloc_bin_slab(tsdn, arena, slab, + bin); + } else { + arena_bin_lower_slab(tsdn, arena, slab, + bin); + } + } + return ret; + } + + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + bin->slabcur = NULL; + } + + if (slab == NULL) { + return NULL; + } + bin->slabcur = slab; + + assert(extent_nfree_get(bin->slabcur) > 0); + + return arena_slab_reg_alloc(slab, bin_info); +} + +/* Choose a bin shard and return the locked bin. */ +bin_t * +arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind, + unsigned *binshard) { + bin_t *bin; + if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) { + *binshard = 0; + } else { + *binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind]; + } + assert(*binshard < bin_infos[binind].n_shards); + bin = &arena->bins[binind].bin_shards[*binshard]; + malloc_mutex_lock(tsdn, &bin->lock); + + return bin; +} + +void +arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { + unsigned i, nfill, cnt; + + assert(tbin->ncached == 0); + + if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { + prof_idump(tsdn); + } + + unsigned binshard; + bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); + + for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> + tcache->lg_fill_div[binind]); i < nfill; i += cnt) { + extent_t *slab; + if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > + 0) { + unsigned tofill = nfill - i; + cnt = tofill < extent_nfree_get(slab) ? + tofill : extent_nfree_get(slab); + arena_slab_reg_alloc_batch( + slab, &bin_infos[binind], cnt, + tbin->avail - nfill + i); + } else { + cnt = 1; + void *ptr = arena_bin_malloc_hard(tsdn, arena, bin, + binind, binshard); + /* + * OOM. tbin->avail isn't yet filled down to its first + * element, so the successful allocations (if any) must + * be moved just before tbin->avail before bailing out. + */ + if (ptr == NULL) { + if (i > 0) { + memmove(tbin->avail - i, + tbin->avail - nfill, + i * sizeof(void *)); + } + break; + } + /* Insert such that low regions get used first. */ + *(tbin->avail - nfill + i) = ptr; + } + if (config_fill && unlikely(opt_junk_alloc)) { + for (unsigned j = 0; j < cnt; j++) { + void* ptr = *(tbin->avail - nfill + i + j); + arena_alloc_junk_small(ptr, &bin_infos[binind], + true); + } + } + } + if (config_stats) { + bin->stats.nmalloc += i; + bin->stats.nrequests += tbin->tstats.nrequests; + bin->stats.curregs += i; + bin->stats.nfills++; + tbin->tstats.nrequests = 0; + } + malloc_mutex_unlock(tsdn, &bin->lock); + tbin->ncached = i; + arena_decay_tick(tsdn, arena); +} + +void +arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) { + if (!zero) { + memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); + } +} + +static void +arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) { + memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); +} +arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = + arena_dalloc_junk_small_impl; + +static void * +arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { + void *ret; + bin_t *bin; + size_t usize; + extent_t *slab; + + assert(binind < SC_NBINS); + usize = sz_index2size(binind); + unsigned binshard; + bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); + + if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { + ret = arena_slab_reg_alloc(slab, &bin_infos[binind]); + } else { + ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard); + } + + if (ret == NULL) { + malloc_mutex_unlock(tsdn, &bin->lock); + return NULL; + } + + if (config_stats) { + bin->stats.nmalloc++; + bin->stats.nrequests++; + bin->stats.curregs++; + } + malloc_mutex_unlock(tsdn, &bin->lock); + if (config_prof && arena_prof_accum(tsdn, arena, usize)) { + prof_idump(tsdn); + } + + if (!zero) { + if (config_fill) { + if (unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, + &bin_infos[binind], false); + } else if (unlikely(opt_zero)) { + memset(ret, 0, usize); + } + } + } else { + if (config_fill && unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, &bin_infos[binind], + true); + } + memset(ret, 0, usize); + } + + arena_decay_tick(tsdn, arena); + return ret; +} + +void * +arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, + bool zero) { + assert(!tsdn_null(tsdn) || arena != NULL); + + if (likely(!tsdn_null(tsdn))) { + arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size); + } + if (unlikely(arena == NULL)) { + return NULL; + } + + if (likely(size <= SC_SMALL_MAXCLASS)) { + return arena_malloc_small(tsdn, arena, ind, zero); + } + return large_malloc(tsdn, arena, sz_index2size(ind), zero); +} + +void * +arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero, tcache_t *tcache) { + void *ret; + + if (usize <= SC_SMALL_MAXCLASS + && (alignment < PAGE + || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { + /* Small; alignment doesn't require special slab placement. */ + ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), + zero, tcache, true); + } else { + if (likely(alignment <= CACHELINE)) { + ret = large_malloc(tsdn, arena, usize, zero); + } else { + ret = large_palloc(tsdn, arena, usize, alignment, zero); + } + } + return ret; +} + +void +arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) { + cassert(config_prof); + assert(ptr != NULL); + assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); + assert(usize <= SC_SMALL_MAXCLASS); + + if (config_opt_safety_checks) { + safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS); + } + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true); + arena_t *arena = extent_arena_get(extent); + + szind_t szind = sz_size2index(usize); + extent_szind_set(extent, szind); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, + szind, false); + + prof_accum_cancel(tsdn, &arena->prof_accum, usize); + + assert(isalloc(tsdn, ptr) == usize); +} + +static size_t +arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { + cassert(config_prof); + assert(ptr != NULL); + + extent_szind_set(extent, SC_NBINS); + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, + SC_NBINS, false); + + assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); + + return SC_LARGE_MINCLASS; +} + +void +arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, + bool slow_path) { + cassert(config_prof); + assert(opt_prof); + + extent_t *extent = iealloc(tsdn, ptr); + size_t usize = extent_usize_get(extent); + size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr); + if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) { + /* + * Currently, we only do redzoning for small sampled + * allocations. + */ + assert(bumped_usize == SC_LARGE_MINCLASS); + safety_check_verify_redzone(ptr, usize, bumped_usize); + } + if (bumped_usize <= tcache_maxclass && tcache != NULL) { + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + sz_size2index(bumped_usize), slow_path); + } else { + large_dalloc(tsdn, extent); + } +} + +static void +arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { + /* Dissociate slab from bin. */ + if (slab == bin->slabcur) { + bin->slabcur = NULL; + } else { + szind_t binind = extent_szind_get(slab); + const bin_info_t *bin_info = &bin_infos[binind]; + + /* + * The following block's conditional is necessary because if the + * slab only contains one region, then it never gets inserted + * into the non-full slabs heap. + */ + if (bin_info->nregs == 1) { + arena_bin_slabs_full_remove(arena, bin, slab); + } else { + arena_bin_slabs_nonfull_remove(bin, slab); + } + } +} + +static void +arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + bin_t *bin) { + assert(slab != bin->slabcur); + + malloc_mutex_unlock(tsdn, &bin->lock); + /******************************/ + arena_slab_dalloc(tsdn, arena, slab); + /****************************/ + malloc_mutex_lock(tsdn, &bin->lock); + if (config_stats) { + bin->stats.curslabs--; + } +} + +static void +arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + bin_t *bin) { + assert(extent_nfree_get(slab) > 0); + + /* + * Make sure that if bin->slabcur is non-NULL, it refers to the + * oldest/lowest non-full slab. It is okay to NULL slabcur out rather + * than proactively keeping it pointing at the oldest/lowest non-full + * slab. + */ + if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { + /* Switch slabcur. */ + if (extent_nfree_get(bin->slabcur) > 0) { + arena_bin_slabs_nonfull_insert(bin, bin->slabcur); + } else { + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + } + bin->slabcur = slab; + if (config_stats) { + bin->stats.reslabs++; + } + } else { + arena_bin_slabs_nonfull_insert(bin, slab); + } +} + +static void +arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin, + szind_t binind, extent_t *slab, void *ptr, bool junked) { + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + const bin_info_t *bin_info = &bin_infos[binind]; + + if (!junked && config_fill && unlikely(opt_junk_free)) { + arena_dalloc_junk_small(ptr, bin_info); + } + + arena_slab_reg_dalloc(slab, slab_data, ptr); + unsigned nfree = extent_nfree_get(slab); + if (nfree == bin_info->nregs) { + arena_dissociate_bin_slab(arena, slab, bin); + arena_dalloc_bin_slab(tsdn, arena, slab, bin); + } else if (nfree == 1 && slab != bin->slabcur) { + arena_bin_slabs_full_remove(arena, bin, slab); + arena_bin_lower_slab(tsdn, arena, slab, bin); + } + + if (config_stats) { + bin->stats.ndalloc++; + bin->stats.curregs--; + } +} + +void +arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin, + szind_t binind, extent_t *extent, void *ptr) { + arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr, + true); +} + +static void +arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { + szind_t binind = extent_szind_get(extent); + unsigned binshard = extent_binshard_get(extent); + bin_t *bin = &arena->bins[binind].bin_shards[binshard]; + + malloc_mutex_lock(tsdn, &bin->lock); + arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr, + false); + malloc_mutex_unlock(tsdn, &bin->lock); +} + +void +arena_dalloc_small(tsdn_t *tsdn, void *ptr) { + extent_t *extent = iealloc(tsdn, ptr); + arena_t *arena = extent_arena_get(extent); + + arena_dalloc_bin(tsdn, arena, extent, ptr); + arena_decay_tick(tsdn, arena); +} + +bool +arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero, size_t *newsize) { + bool ret; + /* Calls with non-zero extra had to clamp extra. */ + assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS); + + extent_t *extent = iealloc(tsdn, ptr); + if (unlikely(size > SC_LARGE_MAXCLASS)) { + ret = true; + goto done; + } + + size_t usize_min = sz_s2u(size); + size_t usize_max = sz_s2u(size + extra); + if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min + <= SC_SMALL_MAXCLASS)) { + /* + * Avoid moving the allocation if the size class can be left the + * same. + */ + assert(bin_infos[sz_size2index(oldsize)].reg_size == + oldsize); + if ((usize_max > SC_SMALL_MAXCLASS + || sz_size2index(usize_max) != sz_size2index(oldsize)) + && (size > oldsize || usize_max < oldsize)) { + ret = true; + goto done; + } + + arena_decay_tick(tsdn, extent_arena_get(extent)); + ret = false; + } else if (oldsize >= SC_LARGE_MINCLASS + && usize_max >= SC_LARGE_MINCLASS) { + ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max, + zero); + } else { + ret = true; + } +done: + assert(extent == iealloc(tsdn, ptr)); + *newsize = extent_usize_get(extent); + + return ret; +} + +static void * +arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) { + if (alignment == 0) { + return arena_malloc(tsdn, arena, usize, sz_size2index(usize), + zero, tcache, true); + } + usize = sz_sa2u(usize, alignment); + if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { + return NULL; + } + return ipalloct(tsdn, usize, alignment, zero, tcache, arena); +} + +void * +arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, + size_t size, size_t alignment, bool zero, tcache_t *tcache, + hook_ralloc_args_t *hook_args) { + size_t usize = sz_s2u(size); + if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) { + return NULL; + } + + if (likely(usize <= SC_SMALL_MAXCLASS)) { + /* Try to avoid moving the allocation. */ + UNUSED size_t newsize; + if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero, + &newsize)) { + hook_invoke_expand(hook_args->is_realloc + ? hook_expand_realloc : hook_expand_rallocx, + ptr, oldsize, usize, (uintptr_t)ptr, + hook_args->args); + return ptr; + } + } + + if (oldsize >= SC_LARGE_MINCLASS + && usize >= SC_LARGE_MINCLASS) { + return large_ralloc(tsdn, arena, ptr, usize, + alignment, zero, tcache, hook_args); + } + + /* + * size and oldsize are different enough that we need to move the + * object. In that case, fall back to allocating new space and copying. + */ + void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, + zero, tcache); + if (ret == NULL) { + return NULL; + } + + hook_invoke_alloc(hook_args->is_realloc + ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret, + hook_args->args); + hook_invoke_dalloc(hook_args->is_realloc + ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); + + /* + * Junk/zero-filling were already done by + * ipalloc()/arena_malloc(). + */ + size_t copysize = (usize < oldsize) ? usize : oldsize; + memcpy(ret, ptr, copysize); + isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); + return ret; +} + +dss_prec_t +arena_dss_prec_get(arena_t *arena) { + return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); +} + +bool +arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { + if (!have_dss) { + return (dss_prec != dss_prec_disabled); + } + atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); + return false; +} + +ssize_t +arena_dirty_decay_ms_default_get(void) { + return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); +} + +bool +arena_dirty_decay_ms_default_set(ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); + return false; +} + +ssize_t +arena_muzzy_decay_ms_default_get(void) { + return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); +} + +bool +arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); + return false; +} + +bool +arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, + size_t *new_limit) { + assert(opt_retain); + + pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); + if (new_limit != NULL) { + size_t limit = *new_limit; + /* Grow no more than the new limit. */ + if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) { + return true; + } + } + + malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx); + if (old_limit != NULL) { + *old_limit = sz_pind2sz(arena->retain_grow_limit); + } + if (new_limit != NULL) { + arena->retain_grow_limit = new_ind; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx); + + return false; +} + +unsigned +arena_nthreads_get(arena_t *arena, bool internal) { + return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); +} + +void +arena_nthreads_inc(arena_t *arena, bool internal) { + atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); +} + +void +arena_nthreads_dec(arena_t *arena, bool internal) { + atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); +} + +size_t +arena_extent_sn_next(arena_t *arena) { + return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); +} + +arena_t * +arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + arena_t *arena; + base_t *base; + unsigned i; + + if (ind == 0) { + base = b0get(); + } else { + base = base_new(tsdn, ind, extent_hooks); + if (base == NULL) { + return NULL; + } + } + + unsigned nbins_total = 0; + for (i = 0; i < SC_NBINS; i++) { + nbins_total += bin_infos[i].n_shards; + } + size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total; + arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE); + if (arena == NULL) { + goto label_error; + } + + atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); + atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); + arena->last_thd = NULL; + + if (config_stats) { + if (arena_stats_init(tsdn, &arena->stats)) { + goto label_error; + } + + ql_new(&arena->tcache_ql); + ql_new(&arena->cache_bin_array_descriptor_ql); + if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", + WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { + goto label_error; + } + } + + if (config_prof) { + if (prof_accum_init(tsdn, &arena->prof_accum)) { + goto label_error; + } + } + + if (config_cache_oblivious) { + /* + * A nondeterministic seed based on the address of arena reduces + * the likelihood of lockstep non-uniform cache index + * utilization among identical concurrent processes, but at the + * cost of test repeatability. For debug builds, instead use a + * deterministic seed. + */ + atomic_store_zu(&arena->offset_state, config_debug ? ind : + (size_t)(uintptr_t)arena, ATOMIC_RELAXED); + } + + atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); + + atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), + ATOMIC_RELAXED); + + atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); + + extent_list_init(&arena->large); + if (malloc_mutex_init(&arena->large_mtx, "arena_large", + WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { + goto label_error; + } + + /* + * Delay coalescing for dirty extents despite the disruptive effect on + * memory layout for best-fit extent allocation, since cached extents + * are likely to be reused soon after deallocation, and the cost of + * merging/splitting extents is non-trivial. + */ + if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, + true)) { + goto label_error; + } + /* + * Coalesce muzzy extents immediately, because operations on them are in + * the critical path much less often than for dirty extents. + */ + if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, + false)) { + goto label_error; + } + /* + * Coalesce retained extents immediately, in part because they will + * never be evicted (and therefore there's no opportunity for delayed + * coalescing), but also because operations on retained extents are not + * in the critical path. + */ + if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, + false)) { + goto label_error; + } + + if (arena_decay_init(&arena->decay_dirty, + arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { + goto label_error; + } + if (arena_decay_init(&arena->decay_muzzy, + arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { + goto label_error; + } + + arena->extent_grow_next = sz_psz2ind(HUGEPAGE); + arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS); + if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", + WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { + goto label_error; + } + + extent_avail_new(&arena->extent_avail); + if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", + WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { + goto label_error; + } + + /* Initialize bins. */ + uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t); + atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE); + for (i = 0; i < SC_NBINS; i++) { + unsigned nshards = bin_infos[i].n_shards; + arena->bins[i].bin_shards = (bin_t *)bin_addr; + bin_addr += nshards * sizeof(bin_t); + for (unsigned j = 0; j < nshards; j++) { + bool err = bin_init(&arena->bins[i].bin_shards[j]); + if (err) { + goto label_error; + } + } + } + assert(bin_addr == (uintptr_t)arena + arena_size); + + arena->base = base; + /* Set arena before creating background threads. */ + arena_set(ind, arena); + + nstime_init(&arena->create_time, 0); + nstime_update(&arena->create_time); + + /* We don't support reentrancy for arena 0 bootstrapping. */ + if (ind != 0) { + /* + * If we're here, then arena 0 already exists, so bootstrapping + * is done enough that we should have tsd. + */ + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn), arena); + if (test_hooks_arena_new_hook) { + test_hooks_arena_new_hook(); + } + post_reentrancy(tsdn_tsd(tsdn)); + } + + return arena; +label_error: + if (ind != 0) { + base_delete(tsdn, base); + } + return NULL; +} + +arena_t * +arena_choose_huge(tsd_t *tsd) { + /* huge_arena_ind can be 0 during init (will use a0). */ + if (huge_arena_ind == 0) { + assert(!malloc_initialized()); + } + + arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false); + if (huge_arena == NULL) { + /* Create the huge arena on demand. */ + assert(huge_arena_ind != 0); + huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true); + if (huge_arena == NULL) { + return NULL; + } + /* + * Purge eagerly for huge allocations, because: 1) number of + * huge allocations is usually small, which means ticker based + * decay is not reliable; and 2) less immediate reuse is + * expected for huge allocations. + */ + if (arena_dirty_decay_ms_default_get() > 0) { + arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); + } + if (arena_muzzy_decay_ms_default_get() > 0) { + arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); + } + } + + return huge_arena; +} + +bool +arena_init_huge(void) { + bool huge_enabled; + + /* The threshold should be large size class. */ + if (opt_oversize_threshold > SC_LARGE_MAXCLASS || + opt_oversize_threshold < SC_LARGE_MINCLASS) { + opt_oversize_threshold = 0; + oversize_threshold = SC_LARGE_MAXCLASS + PAGE; + huge_enabled = false; + } else { + /* Reserve the index for the huge arena. */ + huge_arena_ind = narenas_total_get(); + oversize_threshold = opt_oversize_threshold; + huge_enabled = true; + } + + return huge_enabled; +} + +bool +arena_is_huge(unsigned arena_ind) { + if (huge_arena_ind == 0) { + return false; + } + return (arena_ind == huge_arena_ind); +} + +void +arena_boot(sc_data_t *sc_data) { + arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); + arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); + for (unsigned i = 0; i < SC_NBINS; i++) { + sc_t *sc = &sc_data->sc[i]; + div_init(&arena_binind_div_info[i], + (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta)); + } +} + +void +arena_prefork0(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); +} + +void +arena_prefork1(tsdn_t *tsdn, arena_t *arena) { + if (config_stats) { + malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); + } +} + +void +arena_prefork2(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); +} + +void +arena_prefork3(tsdn_t *tsdn, arena_t *arena) { + extents_prefork(tsdn, &arena->extents_dirty); + extents_prefork(tsdn, &arena->extents_muzzy); + extents_prefork(tsdn, &arena->extents_retained); +} + +void +arena_prefork4(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); +} + +void +arena_prefork5(tsdn_t *tsdn, arena_t *arena) { + base_prefork(tsdn, arena->base); +} + +void +arena_prefork6(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->large_mtx); +} + +void +arena_prefork7(tsdn_t *tsdn, arena_t *arena) { + for (unsigned i = 0; i < SC_NBINS; i++) { + for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { + bin_prefork(tsdn, &arena->bins[i].bin_shards[j]); + } + } +} + +void +arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { + unsigned i; + + for (i = 0; i < SC_NBINS; i++) { + for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { + bin_postfork_parent(tsdn, + &arena->bins[i].bin_shards[j]); + } + } + malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); + base_postfork_parent(tsdn, arena->base); + malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); + extents_postfork_parent(tsdn, &arena->extents_dirty); + extents_postfork_parent(tsdn, &arena->extents_muzzy); + extents_postfork_parent(tsdn, &arena->extents_retained); + malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); + if (config_stats) { + malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); + } +} + +void +arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { + unsigned i; + + atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); + atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); + if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { + arena_nthreads_inc(arena, false); + } + if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { + arena_nthreads_inc(arena, true); + } + if (config_stats) { + ql_new(&arena->tcache_ql); + ql_new(&arena->cache_bin_array_descriptor_ql); + tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); + if (tcache != NULL && tcache->arena == arena) { + ql_elm_new(tcache, link); + ql_tail_insert(&arena->tcache_ql, tcache, link); + cache_bin_array_descriptor_init( + &tcache->cache_bin_array_descriptor, + tcache->bins_small, tcache->bins_large); + ql_tail_insert(&arena->cache_bin_array_descriptor_ql, + &tcache->cache_bin_array_descriptor, link); + } + } + + for (i = 0; i < SC_NBINS; i++) { + for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { + bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]); + } + } + malloc_mutex_postfork_child(tsdn, &arena->large_mtx); + base_postfork_child(tsdn, arena->base); + malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); + extents_postfork_child(tsdn, &arena->extents_dirty); + extents_postfork_child(tsdn, &arena->extents_muzzy); + extents_postfork_child(tsdn, &arena->extents_retained); + malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); + malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); + if (config_stats) { + malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); + } +} diff --git a/deps/jemalloc/src/background_thread.c b/deps/jemalloc/src/background_thread.c new file mode 100644 index 0000000..57b9b25 --- /dev/null +++ b/deps/jemalloc/src/background_thread.c @@ -0,0 +1,939 @@ +#define JEMALLOC_BACKGROUND_THREAD_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" + +JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS + +/******************************************************************************/ +/* Data. */ + +/* This option should be opt-in only. */ +#define BACKGROUND_THREAD_DEFAULT false +/* Read-only after initialization. */ +bool opt_background_thread = BACKGROUND_THREAD_DEFAULT; +size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT + 1; + +/* Used for thread creation, termination and stats. */ +malloc_mutex_t background_thread_lock; +/* Indicates global state. Atomic because decay reads this w/o locking. */ +atomic_b_t background_thread_enabled_state; +size_t n_background_threads; +size_t max_background_threads; +/* Thread info per-index. */ +background_thread_info_t *background_thread_info; + +/******************************************************************************/ + +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER + +static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, + void *(*)(void *), void *__restrict); + +static void +pthread_create_wrapper_init(void) { +#ifdef JEMALLOC_LAZY_LOCK + if (!isthreaded) { + isthreaded = true; + } +#endif +} + +int +pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr, + void *(*start_routine)(void *), void *__restrict arg) { + pthread_create_wrapper_init(); + + return pthread_create_fptr(thread, attr, start_routine, arg); +} +#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */ + +#ifndef JEMALLOC_BACKGROUND_THREAD +#define NOT_REACHED { not_reached(); } +bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED +bool background_threads_enable(tsd_t *tsd) NOT_REACHED +bool background_threads_disable(tsd_t *tsd) NOT_REACHED +void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, size_t npages_new) NOT_REACHED +void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED +void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED +void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED +void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED +bool background_thread_stats_read(tsdn_t *tsdn, + background_thread_stats_t *stats) NOT_REACHED +void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED +#undef NOT_REACHED +#else + +static bool background_thread_enabled_at_fork; + +static void +background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) { + background_thread_wakeup_time_set(tsdn, info, 0); + info->npages_to_purge_new = 0; + if (config_stats) { + info->tot_n_runs = 0; + nstime_init(&info->tot_sleep_time, 0); + } +} + +static inline bool +set_current_thread_affinity(int cpu) { +#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); + + return (ret != 0); +#else + return false; +#endif +} + +/* Threshold for determining when to wake up the background thread. */ +#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024) +#define BILLION UINT64_C(1000000000) +/* Minimal sleep interval 100 ms. */ +#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10) + +static inline size_t +decay_npurge_after_interval(arena_decay_t *decay, size_t interval) { + size_t i; + uint64_t sum = 0; + for (i = 0; i < interval; i++) { + sum += decay->backlog[i] * h_steps[i]; + } + for (; i < SMOOTHSTEP_NSTEPS; i++) { + sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]); + } + + return (size_t)(sum >> SMOOTHSTEP_BFP); +} + +static uint64_t +arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay, + extents_t *extents) { + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + /* Use minimal interval if decay is contended. */ + return BACKGROUND_THREAD_MIN_INTERVAL_NS; + } + + uint64_t interval; + ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); + if (decay_time <= 0) { + /* Purging is eagerly done or disabled currently. */ + interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; + goto label_done; + } + + uint64_t decay_interval_ns = nstime_ns(&decay->interval); + assert(decay_interval_ns > 0); + size_t npages = extents_npages_get(extents); + if (npages == 0) { + unsigned i; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + if (decay->backlog[i] > 0) { + break; + } + } + if (i == SMOOTHSTEP_NSTEPS) { + /* No dirty pages recorded. Sleep indefinitely. */ + interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; + goto label_done; + } + } + if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) { + /* Use max interval. */ + interval = decay_interval_ns * SMOOTHSTEP_NSTEPS; + goto label_done; + } + + size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns; + size_t ub = SMOOTHSTEP_NSTEPS; + /* Minimal 2 intervals to ensure reaching next epoch deadline. */ + lb = (lb < 2) ? 2 : lb; + if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) || + (lb + 2 > ub)) { + interval = BACKGROUND_THREAD_MIN_INTERVAL_NS; + goto label_done; + } + + assert(lb + 2 <= ub); + size_t npurge_lb, npurge_ub; + npurge_lb = decay_npurge_after_interval(decay, lb); + if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) { + interval = decay_interval_ns * lb; + goto label_done; + } + npurge_ub = decay_npurge_after_interval(decay, ub); + if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) { + interval = decay_interval_ns * ub; + goto label_done; + } + + unsigned n_search = 0; + size_t target, npurge; + while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub) + && (lb + 2 < ub)) { + target = (lb + ub) / 2; + npurge = decay_npurge_after_interval(decay, target); + if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) { + ub = target; + npurge_ub = npurge; + } else { + lb = target; + npurge_lb = npurge; + } + assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1); + } + interval = decay_interval_ns * (ub + lb) / 2; +label_done: + interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ? + BACKGROUND_THREAD_MIN_INTERVAL_NS : interval; + malloc_mutex_unlock(tsdn, &decay->mtx); + + return interval; +} + +/* Compute purge interval for background threads. */ +static uint64_t +arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) { + uint64_t i1, i2; + i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty, + &arena->extents_dirty); + if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) { + return i1; + } + i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy, + &arena->extents_muzzy); + + return i1 < i2 ? i1 : i2; +} + +static void +background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, + uint64_t interval) { + if (config_stats) { + info->tot_n_runs++; + } + info->npages_to_purge_new = 0; + + struct timeval tv; + /* Specific clock required by timedwait. */ + gettimeofday(&tv, NULL); + nstime_t before_sleep; + nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000); + + int ret; + if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) { + assert(background_thread_indefinite_sleep(info)); + ret = pthread_cond_wait(&info->cond, &info->mtx.lock); + assert(ret == 0); + } else { + assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS && + interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP); + /* We need malloc clock (can be different from tv). */ + nstime_t next_wakeup; + nstime_init(&next_wakeup, 0); + nstime_update(&next_wakeup); + nstime_iadd(&next_wakeup, interval); + assert(nstime_ns(&next_wakeup) < + BACKGROUND_THREAD_INDEFINITE_SLEEP); + background_thread_wakeup_time_set(tsdn, info, + nstime_ns(&next_wakeup)); + + nstime_t ts_wakeup; + nstime_copy(&ts_wakeup, &before_sleep); + nstime_iadd(&ts_wakeup, interval); + struct timespec ts; + ts.tv_sec = (size_t)nstime_sec(&ts_wakeup); + ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup); + + assert(!background_thread_indefinite_sleep(info)); + ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts); + assert(ret == ETIMEDOUT || ret == 0); + background_thread_wakeup_time_set(tsdn, info, + BACKGROUND_THREAD_INDEFINITE_SLEEP); + } + if (config_stats) { + gettimeofday(&tv, NULL); + nstime_t after_sleep; + nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000); + if (nstime_compare(&after_sleep, &before_sleep) > 0) { + nstime_subtract(&after_sleep, &before_sleep); + nstime_add(&info->tot_sleep_time, &after_sleep); + } + } +} + +static bool +background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) { + if (unlikely(info->state == background_thread_paused)) { + malloc_mutex_unlock(tsdn, &info->mtx); + /* Wait on global lock to update status. */ + malloc_mutex_lock(tsdn, &background_thread_lock); + malloc_mutex_unlock(tsdn, &background_thread_lock); + malloc_mutex_lock(tsdn, &info->mtx); + return true; + } + + return false; +} + +static inline void +background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) { + uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; + unsigned narenas = narenas_total_get(); + + for (unsigned i = ind; i < narenas; i += max_background_threads) { + arena_t *arena = arena_get(tsdn, i, false); + if (!arena) { + continue; + } + arena_decay(tsdn, arena, true, false); + if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) { + /* Min interval will be used. */ + continue; + } + uint64_t interval = arena_decay_compute_purge_interval(tsdn, + arena); + assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS); + if (min_interval > interval) { + min_interval = interval; + } + } + background_thread_sleep(tsdn, info, min_interval); +} + +static bool +background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) { + if (info == &background_thread_info[0]) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), + &background_thread_lock); + } else { + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), + &background_thread_lock); + } + + pre_reentrancy(tsd, NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + bool has_thread; + assert(info->state != background_thread_paused); + if (info->state == background_thread_started) { + has_thread = true; + info->state = background_thread_stopped; + pthread_cond_signal(&info->cond); + } else { + has_thread = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + + if (!has_thread) { + post_reentrancy(tsd); + return false; + } + void *ret; + if (pthread_join(info->thread, &ret)) { + post_reentrancy(tsd); + return true; + } + assert(ret == NULL); + n_background_threads--; + post_reentrancy(tsd); + + return false; +} + +static void *background_thread_entry(void *ind_arg); + +static int +background_thread_create_signals_masked(pthread_t *thread, + const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { + /* + * Mask signals during thread creation so that the thread inherits + * an empty signal set. + */ + sigset_t set; + sigfillset(&set); + sigset_t oldset; + int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset); + if (mask_err != 0) { + return mask_err; + } + int create_err = pthread_create_wrapper(thread, attr, start_routine, + arg); + /* + * Restore the signal mask. Failure to restore the signal mask here + * changes program behavior. + */ + int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL); + if (restore_err != 0) { + malloc_printf(": background thread creation " + "failed (%d), and signal mask restoration failed " + "(%d)\n", create_err, restore_err); + if (opt_abort) { + abort(); + } + } + return create_err; +} + +static bool +check_background_thread_creation(tsd_t *tsd, unsigned *n_created, + bool *created_threads) { + bool ret = false; + if (likely(*n_created == n_background_threads)) { + return ret; + } + + tsdn_t *tsdn = tsd_tsdn(tsd); + malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx); + for (unsigned i = 1; i < max_background_threads; i++) { + if (created_threads[i]) { + continue; + } + background_thread_info_t *info = &background_thread_info[i]; + malloc_mutex_lock(tsdn, &info->mtx); + /* + * In case of the background_thread_paused state because of + * arena reset, delay the creation. + */ + bool create = (info->state == background_thread_started); + malloc_mutex_unlock(tsdn, &info->mtx); + if (!create) { + continue; + } + + pre_reentrancy(tsd, NULL); + int err = background_thread_create_signals_masked(&info->thread, + NULL, background_thread_entry, (void *)(uintptr_t)i); + post_reentrancy(tsd); + + if (err == 0) { + (*n_created)++; + created_threads[i] = true; + } else { + malloc_printf(": background thread " + "creation failed (%d)\n", err); + if (opt_abort) { + abort(); + } + } + /* Return to restart the loop since we unlocked. */ + ret = true; + break; + } + malloc_mutex_lock(tsdn, &background_thread_info[0].mtx); + + return ret; +} + +static void +background_thread0_work(tsd_t *tsd) { + /* Thread0 is also responsible for launching / terminating threads. */ + VARIABLE_ARRAY(bool, created_threads, max_background_threads); + unsigned i; + for (i = 1; i < max_background_threads; i++) { + created_threads[i] = false; + } + /* Start working, and create more threads when asked. */ + unsigned n_created = 1; + while (background_thread_info[0].state != background_thread_stopped) { + if (background_thread_pause_check(tsd_tsdn(tsd), + &background_thread_info[0])) { + continue; + } + if (check_background_thread_creation(tsd, &n_created, + (bool *)&created_threads)) { + continue; + } + background_work_sleep_once(tsd_tsdn(tsd), + &background_thread_info[0], 0); + } + + /* + * Shut down other threads at exit. Note that the ctl thread is holding + * the global background_thread mutex (and is waiting) for us. + */ + assert(!background_thread_enabled()); + for (i = 1; i < max_background_threads; i++) { + background_thread_info_t *info = &background_thread_info[i]; + assert(info->state != background_thread_paused); + if (created_threads[i]) { + background_threads_disable_single(tsd, info); + } else { + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + if (info->state != background_thread_stopped) { + /* The thread was not created. */ + assert(info->state == + background_thread_started); + n_background_threads--; + info->state = background_thread_stopped; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + } + background_thread_info[0].state = background_thread_stopped; + assert(n_background_threads == 1); +} + +static void +background_work(tsd_t *tsd, unsigned ind) { + background_thread_info_t *info = &background_thread_info[ind]; + + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + background_thread_wakeup_time_set(tsd_tsdn(tsd), info, + BACKGROUND_THREAD_INDEFINITE_SLEEP); + if (ind == 0) { + background_thread0_work(tsd); + } else { + while (info->state != background_thread_stopped) { + if (background_thread_pause_check(tsd_tsdn(tsd), + info)) { + continue; + } + background_work_sleep_once(tsd_tsdn(tsd), info, ind); + } + } + assert(info->state == background_thread_stopped); + background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0); + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); +} + +static void * +background_thread_entry(void *ind_arg) { + unsigned thread_ind = (unsigned)(uintptr_t)ind_arg; + assert(thread_ind < max_background_threads); +#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP + pthread_setname_np(pthread_self(), "jemalloc_bg_thd"); +#elif defined(__FreeBSD__) + pthread_set_name_np(pthread_self(), "jemalloc_bg_thd"); +#endif + if (opt_percpu_arena != percpu_arena_disabled) { + set_current_thread_affinity((int)thread_ind); + } + /* + * Start periodic background work. We use internal tsd which avoids + * side effects, for example triggering new arena creation (which in + * turn triggers another background thread creation). + */ + background_work(tsd_internal_fetch(), thread_ind); + assert(pthread_equal(pthread_self(), + background_thread_info[thread_ind].thread)); + + return NULL; +} + +static void +background_thread_init(tsd_t *tsd, background_thread_info_t *info) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + info->state = background_thread_started; + background_thread_info_init(tsd_tsdn(tsd), info); + n_background_threads++; +} + +static bool +background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) { + assert(have_background_thread); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + + /* We create at most NCPUs threads. */ + size_t thread_ind = arena_ind % max_background_threads; + background_thread_info_t *info = &background_thread_info[thread_ind]; + + bool need_new_thread; + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + need_new_thread = background_thread_enabled() && + (info->state == background_thread_stopped); + if (need_new_thread) { + background_thread_init(tsd, info); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + if (!need_new_thread) { + return false; + } + if (arena_ind != 0) { + /* Threads are created asynchronously by Thread 0. */ + background_thread_info_t *t0 = &background_thread_info[0]; + malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx); + assert(t0->state == background_thread_started); + pthread_cond_signal(&t0->cond); + malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx); + + return false; + } + + pre_reentrancy(tsd, NULL); + /* + * To avoid complications (besides reentrancy), create internal + * background threads with the underlying pthread_create. + */ + int err = background_thread_create_signals_masked(&info->thread, NULL, + background_thread_entry, (void *)thread_ind); + post_reentrancy(tsd); + + if (err != 0) { + malloc_printf(": arena 0 background thread creation " + "failed (%d)\n", err); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_stopped; + n_background_threads--; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + + return true; + } + + return false; +} + +/* Create a new background thread if needed. */ +bool +background_thread_create(tsd_t *tsd, unsigned arena_ind) { + assert(have_background_thread); + + bool ret; + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + ret = background_thread_create_locked(tsd, arena_ind); + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + + return ret; +} + +bool +background_threads_enable(tsd_t *tsd) { + assert(n_background_threads == 0); + assert(background_thread_enabled()); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + + VARIABLE_ARRAY(bool, marked, max_background_threads); + unsigned i, nmarked; + for (i = 0; i < max_background_threads; i++) { + marked[i] = false; + } + nmarked = 0; + /* Thread 0 is required and created at the end. */ + marked[0] = true; + /* Mark the threads we need to create for thread 0. */ + unsigned n = narenas_total_get(); + for (i = 1; i < n; i++) { + if (marked[i % max_background_threads] || + arena_get(tsd_tsdn(tsd), i, false) == NULL) { + continue; + } + background_thread_info_t *info = &background_thread_info[ + i % max_background_threads]; + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + assert(info->state == background_thread_stopped); + background_thread_init(tsd, info); + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + marked[i % max_background_threads] = true; + if (++nmarked == max_background_threads) { + break; + } + } + + return background_thread_create_locked(tsd, 0); +} + +bool +background_threads_disable(tsd_t *tsd) { + assert(!background_thread_enabled()); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + + /* Thread 0 will be responsible for terminating other threads. */ + if (background_threads_disable_single(tsd, + &background_thread_info[0])) { + return true; + } + assert(n_background_threads == 0); + + return false; +} + +/* Check if we need to signal the background thread early. */ +void +background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, size_t npages_new) { + background_thread_info_t *info = arena_background_thread_info_get( + arena); + if (malloc_mutex_trylock(tsdn, &info->mtx)) { + /* + * Background thread may hold the mutex for a long period of + * time. We'd like to avoid the variance on application + * threads. So keep this non-blocking, and leave the work to a + * future epoch. + */ + return; + } + + if (info->state != background_thread_started) { + goto label_done; + } + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + goto label_done; + } + + ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); + if (decay_time <= 0) { + /* Purging is eagerly done or disabled currently. */ + goto label_done_unlock2; + } + uint64_t decay_interval_ns = nstime_ns(&decay->interval); + assert(decay_interval_ns > 0); + + nstime_t diff; + nstime_init(&diff, background_thread_wakeup_time_get(info)); + if (nstime_compare(&diff, &decay->epoch) <= 0) { + goto label_done_unlock2; + } + nstime_subtract(&diff, &decay->epoch); + if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) { + goto label_done_unlock2; + } + + if (npages_new > 0) { + size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns); + /* + * Compute how many new pages we would need to purge by the next + * wakeup, which is used to determine if we should signal the + * background thread. + */ + uint64_t npurge_new; + if (n_epoch >= SMOOTHSTEP_NSTEPS) { + npurge_new = npages_new; + } else { + uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1]; + assert(h_steps_max >= + h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); + npurge_new = npages_new * (h_steps_max - + h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); + npurge_new >>= SMOOTHSTEP_BFP; + } + info->npages_to_purge_new += npurge_new; + } + + bool should_signal; + if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) { + should_signal = true; + } else if (unlikely(background_thread_indefinite_sleep(info)) && + (extents_npages_get(&arena->extents_dirty) > 0 || + extents_npages_get(&arena->extents_muzzy) > 0 || + info->npages_to_purge_new > 0)) { + should_signal = true; + } else { + should_signal = false; + } + + if (should_signal) { + info->npages_to_purge_new = 0; + pthread_cond_signal(&info->cond); + } +label_done_unlock2: + malloc_mutex_unlock(tsdn, &decay->mtx); +label_done: + malloc_mutex_unlock(tsdn, &info->mtx); +} + +void +background_thread_prefork0(tsdn_t *tsdn) { + malloc_mutex_prefork(tsdn, &background_thread_lock); + background_thread_enabled_at_fork = background_thread_enabled(); +} + +void +background_thread_prefork1(tsdn_t *tsdn) { + for (unsigned i = 0; i < max_background_threads; i++) { + malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx); + } +} + +void +background_thread_postfork_parent(tsdn_t *tsdn) { + for (unsigned i = 0; i < max_background_threads; i++) { + malloc_mutex_postfork_parent(tsdn, + &background_thread_info[i].mtx); + } + malloc_mutex_postfork_parent(tsdn, &background_thread_lock); +} + +void +background_thread_postfork_child(tsdn_t *tsdn) { + for (unsigned i = 0; i < max_background_threads; i++) { + malloc_mutex_postfork_child(tsdn, + &background_thread_info[i].mtx); + } + malloc_mutex_postfork_child(tsdn, &background_thread_lock); + if (!background_thread_enabled_at_fork) { + return; + } + + /* Clear background_thread state (reset to disabled for child). */ + malloc_mutex_lock(tsdn, &background_thread_lock); + n_background_threads = 0; + background_thread_enabled_set(tsdn, false); + for (unsigned i = 0; i < max_background_threads; i++) { + background_thread_info_t *info = &background_thread_info[i]; + malloc_mutex_lock(tsdn, &info->mtx); + info->state = background_thread_stopped; + int ret = pthread_cond_init(&info->cond, NULL); + assert(ret == 0); + background_thread_info_init(tsdn, info); + malloc_mutex_unlock(tsdn, &info->mtx); + } + malloc_mutex_unlock(tsdn, &background_thread_lock); +} + +bool +background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { + assert(config_stats); + malloc_mutex_lock(tsdn, &background_thread_lock); + if (!background_thread_enabled()) { + malloc_mutex_unlock(tsdn, &background_thread_lock); + return true; + } + + stats->num_threads = n_background_threads; + uint64_t num_runs = 0; + nstime_init(&stats->run_interval, 0); + for (unsigned i = 0; i < max_background_threads; i++) { + background_thread_info_t *info = &background_thread_info[i]; + if (malloc_mutex_trylock(tsdn, &info->mtx)) { + /* + * Each background thread run may take a long time; + * avoid waiting on the stats if the thread is active. + */ + continue; + } + if (info->state != background_thread_stopped) { + num_runs += info->tot_n_runs; + nstime_add(&stats->run_interval, &info->tot_sleep_time); + } + malloc_mutex_unlock(tsdn, &info->mtx); + } + stats->num_runs = num_runs; + if (num_runs > 0) { + nstime_idivide(&stats->run_interval, num_runs); + } + malloc_mutex_unlock(tsdn, &background_thread_lock); + + return false; +} + +#undef BACKGROUND_THREAD_NPAGES_THRESHOLD +#undef BILLION +#undef BACKGROUND_THREAD_MIN_INTERVAL_NS + +#ifdef JEMALLOC_HAVE_DLSYM +#include +#endif + +static bool +pthread_create_fptr_init(void) { + if (pthread_create_fptr != NULL) { + return false; + } + /* + * Try the next symbol first, because 1) when use lazy_lock we have a + * wrapper for pthread_create; and 2) application may define its own + * wrapper as well (and can call malloc within the wrapper). + */ +#ifdef JEMALLOC_HAVE_DLSYM + pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); +#else + pthread_create_fptr = NULL; +#endif + if (pthread_create_fptr == NULL) { + if (config_lazy_lock) { + malloc_write(": Error in dlsym(RTLD_NEXT, " + "\"pthread_create\")\n"); + abort(); + } else { + /* Fall back to the default symbol. */ + pthread_create_fptr = pthread_create; + } + } + + return false; +} + +/* + * When lazy lock is enabled, we need to make sure setting isthreaded before + * taking any background_thread locks. This is called early in ctl (instead of + * wait for the pthread_create calls to trigger) because the mutex is required + * before creating background threads. + */ +void +background_thread_ctl_init(tsdn_t *tsdn) { + malloc_mutex_assert_not_owner(tsdn, &background_thread_lock); +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER + pthread_create_fptr_init(); + pthread_create_wrapper_init(); +#endif +} + +#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */ + +bool +background_thread_boot0(void) { + if (!have_background_thread && opt_background_thread) { + malloc_printf(": option background_thread currently " + "supports pthread only\n"); + return true; + } +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER + if ((config_lazy_lock || opt_background_thread) && + pthread_create_fptr_init()) { + return true; + } +#endif + return false; +} + +bool +background_thread_boot1(tsdn_t *tsdn) { +#ifdef JEMALLOC_BACKGROUND_THREAD + assert(have_background_thread); + assert(narenas_total_get() > 0); + + if (opt_max_background_threads > MAX_BACKGROUND_THREAD_LIMIT) { + opt_max_background_threads = DEFAULT_NUM_BACKGROUND_THREAD; + } + max_background_threads = opt_max_background_threads; + + background_thread_enabled_set(tsdn, opt_background_thread); + if (malloc_mutex_init(&background_thread_lock, + "background_thread_global", + WITNESS_RANK_BACKGROUND_THREAD_GLOBAL, + malloc_mutex_rank_exclusive)) { + return true; + } + + background_thread_info = (background_thread_info_t *)base_alloc(tsdn, + b0get(), opt_max_background_threads * + sizeof(background_thread_info_t), CACHELINE); + if (background_thread_info == NULL) { + return true; + } + + for (unsigned i = 0; i < max_background_threads; i++) { + background_thread_info_t *info = &background_thread_info[i]; + /* Thread mutex is rank_inclusive because of thread0. */ + if (malloc_mutex_init(&info->mtx, "background_thread", + WITNESS_RANK_BACKGROUND_THREAD, + malloc_mutex_address_ordered)) { + return true; + } + if (pthread_cond_init(&info->cond, NULL)) { + return true; + } + malloc_mutex_lock(tsdn, &info->mtx); + info->state = background_thread_stopped; + background_thread_info_init(tsdn, info); + malloc_mutex_unlock(tsdn, &info->mtx); + } +#endif + + return false; +} diff --git a/deps/jemalloc/src/base.c b/deps/jemalloc/src/base.c new file mode 100644 index 0000000..f3c6166 --- /dev/null +++ b/deps/jemalloc/src/base.c @@ -0,0 +1,514 @@ +#define JEMALLOC_BASE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/sz.h" + +/******************************************************************************/ +/* Data. */ + +static base_t *b0; + +metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT; + +const char *metadata_thp_mode_names[] = { + "disabled", + "auto", + "always" +}; + +/******************************************************************************/ + +static inline bool +metadata_thp_madvise(void) { + return (metadata_thp_enabled() && + (init_system_thp_mode == thp_mode_default)); +} + +static void * +base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) { + void *addr; + bool zero = true; + bool commit = true; + + /* Use huge page sizes and alignment regardless of opt_metadata_thp. */ + assert(size == HUGEPAGE_CEILING(size)); + size_t alignment = HUGEPAGE; + if (extent_hooks == &extent_hooks_default) { + addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit); + } else { + /* No arena context as we are creating new arenas. */ + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, NULL); + addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment, + &zero, &commit, ind); + post_reentrancy(tsd); + } + + return addr; +} + +static void +base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, + size_t size) { + /* + * Cascade through dalloc, decommit, purge_forced, and purge_lazy, + * stopping at first success. This cascade is performed for consistency + * with the cascade in extent_dalloc_wrapper() because an application's + * custom hooks may not support e.g. dalloc. This function is only ever + * called as a side effect of arena destruction, so although it might + * seem pointless to do anything besides dalloc here, the application + * may in fact want the end state of all associated virtual memory to be + * in some consistent-but-allocated state. + */ + if (extent_hooks == &extent_hooks_default) { + if (!extent_dalloc_mmap(addr, size)) { + goto label_done; + } + if (!pages_decommit(addr, size)) { + goto label_done; + } + if (!pages_purge_forced(addr, size)) { + goto label_done; + } + if (!pages_purge_lazy(addr, size)) { + goto label_done; + } + /* Nothing worked. This should never happen. */ + not_reached(); + } else { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, NULL); + if (extent_hooks->dalloc != NULL && + !extent_hooks->dalloc(extent_hooks, addr, size, true, + ind)) { + goto label_post_reentrancy; + } + if (extent_hooks->decommit != NULL && + !extent_hooks->decommit(extent_hooks, addr, size, 0, size, + ind)) { + goto label_post_reentrancy; + } + if (extent_hooks->purge_forced != NULL && + !extent_hooks->purge_forced(extent_hooks, addr, size, 0, + size, ind)) { + goto label_post_reentrancy; + } + if (extent_hooks->purge_lazy != NULL && + !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size, + ind)) { + goto label_post_reentrancy; + } + /* Nothing worked. That's the application's problem. */ + label_post_reentrancy: + post_reentrancy(tsd); + } +label_done: + if (metadata_thp_madvise()) { + /* Set NOHUGEPAGE after unmap to avoid kernel defrag. */ + assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 && + (size & HUGEPAGE_MASK) == 0); + pages_nohuge(addr, size); + } +} + +static void +base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr, + size_t size) { + size_t sn; + + sn = *extent_sn_next; + (*extent_sn_next)++; + + extent_binit(extent, addr, size, sn); +} + +static size_t +base_get_num_blocks(base_t *base, bool with_new_block) { + base_block_t *b = base->blocks; + assert(b != NULL); + + size_t n_blocks = with_new_block ? 2 : 1; + while (b->next != NULL) { + n_blocks++; + b = b->next; + } + + return n_blocks; +} + +static void +base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { + assert(opt_metadata_thp == metadata_thp_auto); + malloc_mutex_assert_owner(tsdn, &base->mtx); + if (base->auto_thp_switched) { + return; + } + /* Called when adding a new block. */ + bool should_switch; + if (base_ind_get(base) != 0) { + should_switch = (base_get_num_blocks(base, true) == + BASE_AUTO_THP_THRESHOLD); + } else { + should_switch = (base_get_num_blocks(base, true) == + BASE_AUTO_THP_THRESHOLD_A0); + } + if (!should_switch) { + return; + } + + base->auto_thp_switched = true; + assert(!config_stats || base->n_thp == 0); + /* Make the initial blocks THP lazily. */ + base_block_t *block = base->blocks; + while (block != NULL) { + assert((block->size & HUGEPAGE_MASK) == 0); + pages_huge(block, block->size); + if (config_stats) { + base->n_thp += HUGEPAGE_CEILING(block->size - + extent_bsize_get(&block->extent)) >> LG_HUGEPAGE; + } + block = block->next; + assert(block == NULL || (base_ind_get(base) == 0)); + } +} + +static void * +base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, + size_t alignment) { + void *ret; + + assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM)); + assert(size == ALIGNMENT_CEILING(size, alignment)); + + *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent), + alignment) - (uintptr_t)extent_addr_get(extent); + ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size); + assert(extent_bsize_get(extent) >= *gap_size + size); + extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) + + *gap_size + size), extent_bsize_get(extent) - *gap_size - size, + extent_sn_get(extent)); + return ret; +} + +static void +base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size, + void *addr, size_t size) { + if (extent_bsize_get(extent) > 0) { + /* + * Compute the index for the largest size class that does not + * exceed extent's size. + */ + szind_t index_floor = + sz_size2index(extent_bsize_get(extent) + 1) - 1; + extent_heap_insert(&base->avail[index_floor], extent); + } + + if (config_stats) { + base->allocated += size; + /* + * Add one PAGE to base_resident for every page boundary that is + * crossed by the new allocation. Adjust n_thp similarly when + * metadata_thp is enabled. + */ + base->resident += PAGE_CEILING((uintptr_t)addr + size) - + PAGE_CEILING((uintptr_t)addr - gap_size); + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + if (metadata_thp_madvise() && (opt_metadata_thp == + metadata_thp_always || base->auto_thp_switched)) { + base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size) + - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >> + LG_HUGEPAGE; + assert(base->mapped >= base->n_thp << LG_HUGEPAGE); + } + } +} + +static void * +base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size, + size_t alignment) { + void *ret; + size_t gap_size; + + ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment); + base_extent_bump_alloc_post(base, extent, gap_size, ret, size); + return ret; +} + +/* + * Allocate a block of virtual memory that is large enough to start with a + * base_block_t header, followed by an object of specified size and alignment. + * On success a pointer to the initialized base_block_t header is returned. + */ +static base_block_t * +base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, + unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size, + size_t alignment) { + alignment = ALIGNMENT_CEILING(alignment, QUANTUM); + size_t usize = ALIGNMENT_CEILING(size, alignment); + size_t header_size = sizeof(base_block_t); + size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) - + header_size; + /* + * Create increasingly larger blocks in order to limit the total number + * of disjoint virtual memory ranges. Choose the next size in the page + * size class series (skipping size classes that are not a multiple of + * HUGEPAGE), or a size large enough to satisfy the requested size and + * alignment, whichever is larger. + */ + size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size + + usize)); + pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ? + *pind_last + 1 : *pind_last; + size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next)); + size_t block_size = (min_block_size > next_block_size) ? min_block_size + : next_block_size; + base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind, + block_size); + if (block == NULL) { + return NULL; + } + + if (metadata_thp_madvise()) { + void *addr = (void *)block; + assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 && + (block_size & HUGEPAGE_MASK) == 0); + if (opt_metadata_thp == metadata_thp_always) { + pages_huge(addr, block_size); + } else if (opt_metadata_thp == metadata_thp_auto && + base != NULL) { + /* base != NULL indicates this is not a new base. */ + malloc_mutex_lock(tsdn, &base->mtx); + base_auto_thp_switch(tsdn, base); + if (base->auto_thp_switched) { + pages_huge(addr, block_size); + } + malloc_mutex_unlock(tsdn, &base->mtx); + } + } + + *pind_last = sz_psz2ind(block_size); + block->size = block_size; + block->next = NULL; + assert(block_size >= header_size); + base_extent_init(extent_sn_next, &block->extent, + (void *)((uintptr_t)block + header_size), block_size - header_size); + return block; +} + +/* + * Allocate an extent that is at least as large as specified size, with + * specified alignment. + */ +static extent_t * +base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { + malloc_mutex_assert_owner(tsdn, &base->mtx); + + extent_hooks_t *extent_hooks = base_extent_hooks_get(base); + /* + * Drop mutex during base_block_alloc(), because an extent hook will be + * called. + */ + malloc_mutex_unlock(tsdn, &base->mtx); + base_block_t *block = base_block_alloc(tsdn, base, extent_hooks, + base_ind_get(base), &base->pind_last, &base->extent_sn_next, size, + alignment); + malloc_mutex_lock(tsdn, &base->mtx); + if (block == NULL) { + return NULL; + } + block->next = base->blocks; + base->blocks = block; + if (config_stats) { + base->allocated += sizeof(base_block_t); + base->resident += PAGE_CEILING(sizeof(base_block_t)); + base->mapped += block->size; + if (metadata_thp_madvise() && + !(opt_metadata_thp == metadata_thp_auto + && !base->auto_thp_switched)) { + assert(base->n_thp > 0); + base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >> + LG_HUGEPAGE; + } + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + assert(base->n_thp << LG_HUGEPAGE <= base->mapped); + } + return &block->extent; +} + +base_t * +b0get(void) { + return b0; +} + +base_t * +base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + pszind_t pind_last = 0; + size_t extent_sn_next = 0; + base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind, + &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM); + if (block == NULL) { + return NULL; + } + + size_t gap_size; + size_t base_alignment = CACHELINE; + size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment); + base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent, + &gap_size, base_size, base_alignment); + base->ind = ind; + atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED); + if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, + malloc_mutex_rank_exclusive)) { + base_unmap(tsdn, extent_hooks, ind, block, block->size); + return NULL; + } + base->pind_last = pind_last; + base->extent_sn_next = extent_sn_next; + base->blocks = block; + base->auto_thp_switched = false; + for (szind_t i = 0; i < SC_NSIZES; i++) { + extent_heap_new(&base->avail[i]); + } + if (config_stats) { + base->allocated = sizeof(base_block_t); + base->resident = PAGE_CEILING(sizeof(base_block_t)); + base->mapped = block->size; + base->n_thp = (opt_metadata_thp == metadata_thp_always) && + metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t)) + >> LG_HUGEPAGE : 0; + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + assert(base->n_thp << LG_HUGEPAGE <= base->mapped); + } + base_extent_bump_alloc_post(base, &block->extent, gap_size, base, + base_size); + + return base; +} + +void +base_delete(tsdn_t *tsdn, base_t *base) { + extent_hooks_t *extent_hooks = base_extent_hooks_get(base); + base_block_t *next = base->blocks; + do { + base_block_t *block = next; + next = block->next; + base_unmap(tsdn, extent_hooks, base_ind_get(base), block, + block->size); + } while (next != NULL); +} + +extent_hooks_t * +base_extent_hooks_get(base_t *base) { + return (extent_hooks_t *)atomic_load_p(&base->extent_hooks, + ATOMIC_ACQUIRE); +} + +extent_hooks_t * +base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) { + extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base); + atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE); + return old_extent_hooks; +} + +static void * +base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment, + size_t *esn) { + alignment = QUANTUM_CEILING(alignment); + size_t usize = ALIGNMENT_CEILING(size, alignment); + size_t asize = usize + alignment - QUANTUM; + + extent_t *extent = NULL; + malloc_mutex_lock(tsdn, &base->mtx); + for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) { + extent = extent_heap_remove_first(&base->avail[i]); + if (extent != NULL) { + /* Use existing space. */ + break; + } + } + if (extent == NULL) { + /* Try to allocate more space. */ + extent = base_extent_alloc(tsdn, base, usize, alignment); + } + void *ret; + if (extent == NULL) { + ret = NULL; + goto label_return; + } + + ret = base_extent_bump_alloc(base, extent, usize, alignment); + if (esn != NULL) { + *esn = extent_sn_get(extent); + } +label_return: + malloc_mutex_unlock(tsdn, &base->mtx); + return ret; +} + +/* + * base_alloc() returns zeroed memory, which is always demand-zeroed for the + * auto arenas, in order to make multi-page sparse data structures such as radix + * tree nodes efficient with respect to physical memory usage. Upon success a + * pointer to at least size bytes with specified alignment is returned. Note + * that size is rounded up to the nearest multiple of alignment to avoid false + * sharing. + */ +void * +base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { + return base_alloc_impl(tsdn, base, size, alignment, NULL); +} + +extent_t * +base_alloc_extent(tsdn_t *tsdn, base_t *base) { + size_t esn; + extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t), + CACHELINE, &esn); + if (extent == NULL) { + return NULL; + } + extent_esn_set(extent, esn); + return extent; +} + +void +base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident, + size_t *mapped, size_t *n_thp) { + cassert(config_stats); + + malloc_mutex_lock(tsdn, &base->mtx); + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + *allocated = base->allocated; + *resident = base->resident; + *mapped = base->mapped; + *n_thp = base->n_thp; + malloc_mutex_unlock(tsdn, &base->mtx); +} + +void +base_prefork(tsdn_t *tsdn, base_t *base) { + malloc_mutex_prefork(tsdn, &base->mtx); +} + +void +base_postfork_parent(tsdn_t *tsdn, base_t *base) { + malloc_mutex_postfork_parent(tsdn, &base->mtx); +} + +void +base_postfork_child(tsdn_t *tsdn, base_t *base) { + malloc_mutex_postfork_child(tsdn, &base->mtx); +} + +bool +base_boot(tsdn_t *tsdn) { + b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); + return (b0 == NULL); +} diff --git a/deps/jemalloc/src/bin.c b/deps/jemalloc/src/bin.c new file mode 100644 index 0000000..bca6b12 --- /dev/null +++ b/deps/jemalloc/src/bin.c @@ -0,0 +1,95 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/bin.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/witness.h" + +bin_info_t bin_infos[SC_NBINS]; + +static void +bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], + bin_info_t bin_infos[SC_NBINS]) { + for (unsigned i = 0; i < SC_NBINS; i++) { + bin_info_t *bin_info = &bin_infos[i]; + sc_t *sc = &sc_data->sc[i]; + bin_info->reg_size = ((size_t)1U << sc->lg_base) + + ((size_t)sc->ndelta << sc->lg_delta); + bin_info->slab_size = (sc->pgs << LG_PAGE); + bin_info->nregs = + (uint32_t)(bin_info->slab_size / bin_info->reg_size); + bin_info->n_shards = bin_shard_sizes[i]; + bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER( + bin_info->nregs); + bin_info->bitmap_info = bitmap_info; + } +} + +bool +bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size, + size_t end_size, size_t nshards) { + if (nshards > BIN_SHARDS_MAX || nshards == 0) { + return true; + } + + if (start_size > SC_SMALL_MAXCLASS) { + return false; + } + if (end_size > SC_SMALL_MAXCLASS) { + end_size = SC_SMALL_MAXCLASS; + } + + /* Compute the index since this may happen before sz init. */ + szind_t ind1 = sz_size2index_compute(start_size); + szind_t ind2 = sz_size2index_compute(end_size); + for (unsigned i = ind1; i <= ind2; i++) { + bin_shard_sizes[i] = (unsigned)nshards; + } + + return false; +} + +void +bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) { + /* Load the default number of shards. */ + for (unsigned i = 0; i < SC_NBINS; i++) { + bin_shard_sizes[i] = N_BIN_SHARDS_DEFAULT; + } +} + +void +bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { + assert(sc_data->initialized); + bin_infos_init(sc_data, bin_shard_sizes, bin_infos); +} + +bool +bin_init(bin_t *bin) { + if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN, + malloc_mutex_rank_exclusive)) { + return true; + } + bin->slabcur = NULL; + extent_heap_new(&bin->slabs_nonfull); + extent_list_init(&bin->slabs_full); + if (config_stats) { + memset(&bin->stats, 0, sizeof(bin_stats_t)); + } + return false; +} + +void +bin_prefork(tsdn_t *tsdn, bin_t *bin) { + malloc_mutex_prefork(tsdn, &bin->lock); +} + +void +bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) { + malloc_mutex_postfork_parent(tsdn, &bin->lock); +} + +void +bin_postfork_child(tsdn_t *tsdn, bin_t *bin) { + malloc_mutex_postfork_child(tsdn, &bin->lock); +} diff --git a/deps/jemalloc/src/bitmap.c b/deps/jemalloc/src/bitmap.c new file mode 100644 index 0000000..468b317 --- /dev/null +++ b/deps/jemalloc/src/bitmap.c @@ -0,0 +1,121 @@ +#define JEMALLOC_BITMAP_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" + +/******************************************************************************/ + +#ifdef BITMAP_USE_TREE + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { + unsigned i; + size_t group_count; + + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + /* + * Compute the number of groups necessary to store nbits bits, and + * progressively work upward through the levels until reaching a level + * that requires only one group. + */ + binfo->levels[0].group_offset = 0; + group_count = BITMAP_BITS2GROUPS(nbits); + for (i = 1; group_count > 1; i++) { + assert(i < BITMAP_MAX_LEVELS); + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + group_count = BITMAP_BITS2GROUPS(group_count); + } + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); + binfo->nlevels = i; + binfo->nbits = nbits; +} + +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) { + return binfo->levels[binfo->nlevels].group_offset; +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { + size_t extra; + unsigned i; + + /* + * Bits are actually inverted with regard to the external bitmap + * interface. + */ + + if (fill) { + /* The "filled" bitmap starts out with all 0 bits. */ + memset(bitmap, 0, bitmap_size(binfo)); + return; + } + + /* + * The "empty" bitmap starts out with all 1 bits, except for trailing + * unused bits (if any). Note that each group uses bit 0 to correspond + * to the first logical bit in the group, so extra bits are the most + * significant bits of the last group. + */ + memset(bitmap, 0xffU, bitmap_size(binfo)); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) { + bitmap[binfo->levels[1].group_offset - 1] >>= extra; + } + for (i = 1; i < binfo->nlevels; i++) { + size_t group_count = binfo->levels[i].group_offset - + binfo->levels[i-1].group_offset; + extra = (BITMAP_GROUP_NBITS - (group_count & + BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) { + bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; + } + } +} + +#else /* BITMAP_USE_TREE */ + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + binfo->ngroups = BITMAP_BITS2GROUPS(nbits); + binfo->nbits = nbits; +} + +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) { + return binfo->ngroups; +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { + size_t extra; + + if (fill) { + memset(bitmap, 0, bitmap_size(binfo)); + return; + } + + memset(bitmap, 0xffU, bitmap_size(binfo)); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) { + bitmap[binfo->ngroups - 1] >>= extra; + } +} + +#endif /* BITMAP_USE_TREE */ + +size_t +bitmap_size(const bitmap_info_t *binfo) { + return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP); +} diff --git a/deps/jemalloc/src/ckh.c b/deps/jemalloc/src/ckh.c new file mode 100644 index 0000000..1bf6df5 --- /dev/null +++ b/deps/jemalloc/src/ckh.c @@ -0,0 +1,570 @@ +/* + ******************************************************************************* + * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each + * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash + * functions are employed. The original cuckoo hashing algorithm was described + * in: + * + * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms + * 51(2):122-144. + * + * Generalization of cuckoo hashing was discussed in: + * + * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical + * alternative to traditional hash tables. In Proceedings of the 7th + * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, + * January 2006. + * + * This implementation uses precisely two hash functions because that is the + * fewest that can work, and supporting multiple hashes is an implementation + * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) + * that shows approximate expected maximum load factors for various + * configurations: + * + * | #cells/bucket | + * #hashes | 1 | 2 | 4 | 8 | + * --------+-------+-------+-------+-------+ + * 1 | 0.006 | 0.006 | 0.03 | 0.12 | + * 2 | 0.49 | 0.86 |>0.93< |>0.96< | + * 3 | 0.91 | 0.97 | 0.98 | 0.999 | + * 4 | 0.97 | 0.99 | 0.999 | | + * + * The number of cells per bucket is chosen such that a bucket fits in one cache + * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, + * respectively. + * + ******************************************************************************/ +#define JEMALLOC_CKH_C_ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/ckh.h" + +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/util.h" + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); +static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); + +/******************************************************************************/ + +/* + * Search bucket for key and return the cell number if found; SIZE_T_MAX + * otherwise. + */ +static size_t +ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) { + ckhc_t *cell; + unsigned i; + + for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; + if (cell->key != NULL && ckh->keycomp(key, cell->key)) { + return (bucket << LG_CKH_BUCKET_CELLS) + i; + } + } + + return SIZE_T_MAX; +} + +/* + * Search table for key and return cell number if found; SIZE_T_MAX otherwise. + */ +static size_t +ckh_isearch(ckh_t *ckh, const void *key) { + size_t hashes[2], bucket, cell; + + assert(ckh != NULL); + + ckh->hash(key, hashes); + + /* Search primary bucket. */ + bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); + cell = ckh_bucket_search(ckh, bucket, key); + if (cell != SIZE_T_MAX) { + return cell; + } + + /* Search secondary bucket. */ + bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); + cell = ckh_bucket_search(ckh, bucket, key); + return cell; +} + +static bool +ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, + const void *data) { + ckhc_t *cell; + unsigned offset, i; + + /* + * Cycle through the cells in the bucket, starting at a random position. + * The randomness avoids worst-case search overhead as buckets fill up. + */ + offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); + for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; + if (cell->key == NULL) { + cell->key = key; + cell->data = data; + ckh->count++; + return false; + } + } + + return true; +} + +/* + * No space is available in bucket. Randomly evict an item, then try to find an + * alternate location for that item. Iteratively repeat this + * eviction/relocation procedure until either success or detection of an + * eviction/relocation bucket cycle. + */ +static bool +ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, + void const **argdata) { + const void *key, *data, *tkey, *tdata; + ckhc_t *cell; + size_t hashes[2], bucket, tbucket; + unsigned i; + + bucket = argbucket; + key = *argkey; + data = *argdata; + while (true) { + /* + * Choose a random item within the bucket to evict. This is + * critical to correct function, because without (eventually) + * evicting all items within a bucket during iteration, it + * would be possible to get stuck in an infinite loop if there + * were an item for which both hashes indicated the same + * bucket. + */ + i = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; + assert(cell->key != NULL); + + /* Swap cell->{key,data} and {key,data} (evict). */ + tkey = cell->key; tdata = cell->data; + cell->key = key; cell->data = data; + key = tkey; data = tdata; + +#ifdef CKH_COUNT + ckh->nrelocs++; +#endif + + /* Find the alternate bucket for the evicted item. */ + ckh->hash(key, hashes); + tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (tbucket == bucket) { + tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) + - 1); + /* + * It may be that (tbucket == bucket) still, if the + * item's hashes both indicate this bucket. However, + * we are guaranteed to eventually escape this bucket + * during iteration, assuming pseudo-random item + * selection (true randomness would make infinite + * looping a remote possibility). The reason we can + * never get trapped forever is that there are two + * cases: + * + * 1) This bucket == argbucket, so we will quickly + * detect an eviction cycle and terminate. + * 2) An item was evicted to this bucket from another, + * which means that at least one item in this bucket + * has hashes that indicate distinct buckets. + */ + } + /* Check for a cycle. */ + if (tbucket == argbucket) { + *argkey = key; + *argdata = data; + return true; + } + + bucket = tbucket; + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } + } +} + +static bool +ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { + size_t hashes[2], bucket; + const void *key = *argkey; + const void *data = *argdata; + + ckh->hash(key, hashes); + + /* Try to insert in primary bucket. */ + bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } + + /* Try to insert in secondary bucket. */ + bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } + + /* + * Try to find a place for this item via iterative eviction/relocation. + */ + return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata); +} + +/* + * Try to rebuild the hash table from scratch by inserting all items from the + * old table into the new. + */ +static bool +ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { + size_t count, i, nins; + const void *key, *data; + + count = ckh->count; + ckh->count = 0; + for (i = nins = 0; nins < count; i++) { + if (aTab[i].key != NULL) { + key = aTab[i].key; + data = aTab[i].data; + if (ckh_try_insert(ckh, &key, &data)) { + ckh->count = count; + return true; + } + nins++; + } + } + + return false; +} + +static bool +ckh_grow(tsd_t *tsd, ckh_t *ckh) { + bool ret; + ckhc_t *tab, *ttab; + unsigned lg_prevbuckets, lg_curcells; + +#ifdef CKH_COUNT + ckh->ngrows++; +#endif + + /* + * It is possible (though unlikely, given well behaved hashes) that the + * table will have to be doubled more than once in order to create a + * usable table. + */ + lg_prevbuckets = ckh->lg_curbuckets; + lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; + while (true) { + size_t usize; + + lg_curcells++; + usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (unlikely(usize == 0 + || usize > SC_LARGE_MAXCLASS)) { + ret = true; + goto label_return; + } + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, + true, NULL, true, arena_ichoose(tsd, NULL)); + if (tab == NULL) { + ret = true; + goto label_return; + } + /* Swap in new table. */ + ttab = ckh->tab; + ckh->tab = tab; + tab = ttab; + ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; + + if (!ckh_rebuild(ckh, tab)) { + idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); + break; + } + + /* Rebuilding failed, so back out partially rebuilt table. */ + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); + ckh->tab = tab; + ckh->lg_curbuckets = lg_prevbuckets; + } + + ret = false; +label_return: + return ret; +} + +static void +ckh_shrink(tsd_t *tsd, ckh_t *ckh) { + ckhc_t *tab, *ttab; + size_t usize; + unsigned lg_prevbuckets, lg_curcells; + + /* + * It is possible (though unlikely, given well behaved hashes) that the + * table rebuild will fail. + */ + lg_prevbuckets = ckh->lg_curbuckets; + lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; + usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { + return; + } + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, + true, arena_ichoose(tsd, NULL)); + if (tab == NULL) { + /* + * An OOM error isn't worth propagating, since it doesn't + * prevent this or future operations from proceeding. + */ + return; + } + /* Swap in new table. */ + ttab = ckh->tab; + ckh->tab = tab; + tab = ttab; + ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; + + if (!ckh_rebuild(ckh, tab)) { + idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); +#ifdef CKH_COUNT + ckh->nshrinks++; +#endif + return; + } + + /* Rebuilding failed, so back out partially rebuilt table. */ + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); + ckh->tab = tab; + ckh->lg_curbuckets = lg_prevbuckets; +#ifdef CKH_COUNT + ckh->nshrinkfails++; +#endif +} + +bool +ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, + ckh_keycomp_t *keycomp) { + bool ret; + size_t mincells, usize; + unsigned lg_mincells; + + assert(minitems > 0); + assert(hash != NULL); + assert(keycomp != NULL); + +#ifdef CKH_COUNT + ckh->ngrows = 0; + ckh->nshrinks = 0; + ckh->nshrinkfails = 0; + ckh->ninserts = 0; + ckh->nrelocs = 0; +#endif + ckh->prng_state = 42; /* Value doesn't really matter. */ + ckh->count = 0; + + /* + * Find the minimum power of 2 that is large enough to fit minitems + * entries. We are using (2+,2) cuckoo hashing, which has an expected + * maximum load factor of at least ~0.86, so 0.75 is a conservative load + * factor that will typically allow mincells items to fit without ever + * growing the table. + */ + assert(LG_CKH_BUCKET_CELLS > 0); + mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; + for (lg_mincells = LG_CKH_BUCKET_CELLS; + (ZU(1) << lg_mincells) < mincells; + lg_mincells++) { + /* Do nothing. */ + } + ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; + ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; + ckh->hash = hash; + ckh->keycomp = keycomp; + + usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); + if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { + ret = true; + goto label_return; + } + ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, + NULL, true, arena_ichoose(tsd, NULL)); + if (ckh->tab == NULL) { + ret = true; + goto label_return; + } + + ret = false; +label_return: + return ret; +} + +void +ckh_delete(tsd_t *tsd, ckh_t *ckh) { + assert(ckh != NULL); + +#ifdef CKH_VERBOSE + malloc_printf( + "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64"," + " nshrinkfails: %"FMTu64", ninserts: %"FMTu64"," + " nrelocs: %"FMTu64"\n", __func__, ckh, + (unsigned long long)ckh->ngrows, + (unsigned long long)ckh->nshrinks, + (unsigned long long)ckh->nshrinkfails, + (unsigned long long)ckh->ninserts, + (unsigned long long)ckh->nrelocs); +#endif + + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); + if (config_debug) { + memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); + } +} + +size_t +ckh_count(ckh_t *ckh) { + assert(ckh != NULL); + + return ckh->count; +} + +bool +ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) { + size_t i, ncells; + + for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { + if (ckh->tab[i].key != NULL) { + if (key != NULL) { + *key = (void *)ckh->tab[i].key; + } + if (data != NULL) { + *data = (void *)ckh->tab[i].data; + } + *tabind = i + 1; + return false; + } + } + + return true; +} + +bool +ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) { + bool ret; + + assert(ckh != NULL); + assert(ckh_search(ckh, key, NULL, NULL)); + +#ifdef CKH_COUNT + ckh->ninserts++; +#endif + + while (ckh_try_insert(ckh, &key, &data)) { + if (ckh_grow(tsd, ckh)) { + ret = true; + goto label_return; + } + } + + ret = false; +label_return: + return ret; +} + +bool +ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, + void **data) { + size_t cell; + + assert(ckh != NULL); + + cell = ckh_isearch(ckh, searchkey); + if (cell != SIZE_T_MAX) { + if (key != NULL) { + *key = (void *)ckh->tab[cell].key; + } + if (data != NULL) { + *data = (void *)ckh->tab[cell].data; + } + ckh->tab[cell].key = NULL; + ckh->tab[cell].data = NULL; /* Not necessary. */ + + ckh->count--; + /* Try to halve the table if it is less than 1/4 full. */ + if (ckh->count < (ZU(1) << (ckh->lg_curbuckets + + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets + > ckh->lg_minbuckets) { + /* Ignore error due to OOM. */ + ckh_shrink(tsd, ckh); + } + + return false; + } + + return true; +} + +bool +ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) { + size_t cell; + + assert(ckh != NULL); + + cell = ckh_isearch(ckh, searchkey); + if (cell != SIZE_T_MAX) { + if (key != NULL) { + *key = (void *)ckh->tab[cell].key; + } + if (data != NULL) { + *data = (void *)ckh->tab[cell].data; + } + return false; + } + + return true; +} + +void +ckh_string_hash(const void *key, size_t r_hash[2]) { + hash(key, strlen((const char *)key), 0x94122f33U, r_hash); +} + +bool +ckh_string_keycomp(const void *k1, const void *k2) { + assert(k1 != NULL); + assert(k2 != NULL); + + return !strcmp((char *)k1, (char *)k2); +} + +void +ckh_pointer_hash(const void *key, size_t r_hash[2]) { + union { + const void *v; + size_t i; + } u; + + assert(sizeof(u.v) == sizeof(u.i)); + u.v = key; + hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash); +} + +bool +ckh_pointer_keycomp(const void *k1, const void *k2) { + return (k1 == k2); +} diff --git a/deps/jemalloc/src/ctl.c b/deps/jemalloc/src/ctl.c new file mode 100644 index 0000000..48afaa6 --- /dev/null +++ b/deps/jemalloc/src/ctl.c @@ -0,0 +1,3435 @@ +#define JEMALLOC_CTL_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/util.h" + +/******************************************************************************/ +/* Data. */ + +/* + * ctl_mtx protects the following: + * - ctl_stats->* + */ +static malloc_mutex_t ctl_mtx; +static bool ctl_initialized; +static ctl_stats_t *ctl_stats; +static ctl_arenas_t *ctl_arenas; + +/******************************************************************************/ +/* Helpers for named and indexed nodes. */ + +static const ctl_named_node_t * +ctl_named_node(const ctl_node_t *node) { + return ((node->named) ? (const ctl_named_node_t *)node : NULL); +} + +static const ctl_named_node_t * +ctl_named_children(const ctl_named_node_t *node, size_t index) { + const ctl_named_node_t *children = ctl_named_node(node->children); + + return (children ? &children[index] : NULL); +} + +static const ctl_indexed_node_t * +ctl_indexed_node(const ctl_node_t *node) { + return (!node->named ? (const ctl_indexed_node_t *)node : NULL); +} + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +#define CTL_PROTO(n) \ +static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen); + +#define INDEX_PROTO(n) \ +static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ + const size_t *mib, size_t miblen, size_t i); + +CTL_PROTO(version) +CTL_PROTO(epoch) +CTL_PROTO(background_thread) +CTL_PROTO(max_background_threads) +CTL_PROTO(thread_tcache_enabled) +CTL_PROTO(thread_tcache_flush) +CTL_PROTO(thread_prof_name) +CTL_PROTO(thread_prof_active) +CTL_PROTO(thread_arena) +CTL_PROTO(thread_allocated) +CTL_PROTO(thread_allocatedp) +CTL_PROTO(thread_deallocated) +CTL_PROTO(thread_deallocatedp) +CTL_PROTO(config_cache_oblivious) +CTL_PROTO(config_debug) +CTL_PROTO(config_fill) +CTL_PROTO(config_lazy_lock) +CTL_PROTO(config_malloc_conf) +CTL_PROTO(config_opt_safety_checks) +CTL_PROTO(config_prof) +CTL_PROTO(config_prof_libgcc) +CTL_PROTO(config_prof_libunwind) +CTL_PROTO(config_stats) +CTL_PROTO(config_utrace) +CTL_PROTO(config_xmalloc) +CTL_PROTO(opt_abort) +CTL_PROTO(opt_abort_conf) +CTL_PROTO(opt_confirm_conf) +CTL_PROTO(opt_metadata_thp) +CTL_PROTO(opt_retain) +CTL_PROTO(opt_dss) +CTL_PROTO(opt_narenas) +CTL_PROTO(opt_percpu_arena) +CTL_PROTO(opt_oversize_threshold) +CTL_PROTO(opt_background_thread) +CTL_PROTO(opt_max_background_threads) +CTL_PROTO(opt_dirty_decay_ms) +CTL_PROTO(opt_muzzy_decay_ms) +CTL_PROTO(opt_stats_print) +CTL_PROTO(opt_stats_print_opts) +CTL_PROTO(opt_junk) +CTL_PROTO(opt_zero) +CTL_PROTO(opt_utrace) +CTL_PROTO(opt_xmalloc) +CTL_PROTO(opt_tcache) +CTL_PROTO(opt_thp) +CTL_PROTO(opt_lg_extent_max_active_fit) +CTL_PROTO(opt_lg_tcache_max) +CTL_PROTO(opt_prof) +CTL_PROTO(opt_prof_prefix) +CTL_PROTO(opt_prof_active) +CTL_PROTO(opt_prof_thread_active_init) +CTL_PROTO(opt_lg_prof_sample) +CTL_PROTO(opt_lg_prof_interval) +CTL_PROTO(opt_prof_gdump) +CTL_PROTO(opt_prof_final) +CTL_PROTO(opt_prof_leak) +CTL_PROTO(opt_prof_accum) +CTL_PROTO(tcache_create) +CTL_PROTO(tcache_flush) +CTL_PROTO(tcache_destroy) +CTL_PROTO(arena_i_initialized) +CTL_PROTO(arena_i_decay) +CTL_PROTO(arena_i_purge) +CTL_PROTO(arena_i_reset) +CTL_PROTO(arena_i_destroy) +CTL_PROTO(arena_i_dss) +CTL_PROTO(arena_i_dirty_decay_ms) +CTL_PROTO(arena_i_muzzy_decay_ms) +CTL_PROTO(arena_i_extent_hooks) +CTL_PROTO(arena_i_retain_grow_limit) +INDEX_PROTO(arena_i) +CTL_PROTO(arenas_bin_i_size) +CTL_PROTO(arenas_bin_i_nregs) +CTL_PROTO(arenas_bin_i_slab_size) +CTL_PROTO(arenas_bin_i_nshards) +INDEX_PROTO(arenas_bin_i) +CTL_PROTO(arenas_lextent_i_size) +INDEX_PROTO(arenas_lextent_i) +CTL_PROTO(arenas_narenas) +CTL_PROTO(arenas_dirty_decay_ms) +CTL_PROTO(arenas_muzzy_decay_ms) +CTL_PROTO(arenas_quantum) +CTL_PROTO(arenas_page) +CTL_PROTO(arenas_tcache_max) +CTL_PROTO(arenas_nbins) +CTL_PROTO(arenas_nhbins) +CTL_PROTO(arenas_nlextents) +CTL_PROTO(arenas_create) +CTL_PROTO(arenas_lookup) +CTL_PROTO(prof_thread_active_init) +CTL_PROTO(prof_active) +CTL_PROTO(prof_dump) +CTL_PROTO(prof_gdump) +CTL_PROTO(prof_reset) +CTL_PROTO(prof_interval) +CTL_PROTO(lg_prof_sample) +CTL_PROTO(prof_log_start) +CTL_PROTO(prof_log_stop) +CTL_PROTO(stats_arenas_i_small_allocated) +CTL_PROTO(stats_arenas_i_small_nmalloc) +CTL_PROTO(stats_arenas_i_small_ndalloc) +CTL_PROTO(stats_arenas_i_small_nrequests) +CTL_PROTO(stats_arenas_i_small_nfills) +CTL_PROTO(stats_arenas_i_small_nflushes) +CTL_PROTO(stats_arenas_i_large_allocated) +CTL_PROTO(stats_arenas_i_large_nmalloc) +CTL_PROTO(stats_arenas_i_large_ndalloc) +CTL_PROTO(stats_arenas_i_large_nrequests) +CTL_PROTO(stats_arenas_i_large_nfills) +CTL_PROTO(stats_arenas_i_large_nflushes) +CTL_PROTO(stats_arenas_i_bins_j_nmalloc) +CTL_PROTO(stats_arenas_i_bins_j_ndalloc) +CTL_PROTO(stats_arenas_i_bins_j_nrequests) +CTL_PROTO(stats_arenas_i_bins_j_curregs) +CTL_PROTO(stats_arenas_i_bins_j_nfills) +CTL_PROTO(stats_arenas_i_bins_j_nflushes) +CTL_PROTO(stats_arenas_i_bins_j_nslabs) +CTL_PROTO(stats_arenas_i_bins_j_nreslabs) +CTL_PROTO(stats_arenas_i_bins_j_curslabs) +CTL_PROTO(stats_arenas_i_bins_j_nonfull_slabs) +INDEX_PROTO(stats_arenas_i_bins_j) +CTL_PROTO(stats_arenas_i_lextents_j_nmalloc) +CTL_PROTO(stats_arenas_i_lextents_j_ndalloc) +CTL_PROTO(stats_arenas_i_lextents_j_nrequests) +CTL_PROTO(stats_arenas_i_lextents_j_curlextents) +INDEX_PROTO(stats_arenas_i_lextents_j) +CTL_PROTO(stats_arenas_i_extents_j_ndirty) +CTL_PROTO(stats_arenas_i_extents_j_nmuzzy) +CTL_PROTO(stats_arenas_i_extents_j_nretained) +CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes) +CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes) +CTL_PROTO(stats_arenas_i_extents_j_retained_bytes) +INDEX_PROTO(stats_arenas_i_extents_j) +CTL_PROTO(stats_arenas_i_nthreads) +CTL_PROTO(stats_arenas_i_uptime) +CTL_PROTO(stats_arenas_i_dss) +CTL_PROTO(stats_arenas_i_dirty_decay_ms) +CTL_PROTO(stats_arenas_i_muzzy_decay_ms) +CTL_PROTO(stats_arenas_i_pactive) +CTL_PROTO(stats_arenas_i_pdirty) +CTL_PROTO(stats_arenas_i_pmuzzy) +CTL_PROTO(stats_arenas_i_mapped) +CTL_PROTO(stats_arenas_i_retained) +CTL_PROTO(stats_arenas_i_extent_avail) +CTL_PROTO(stats_arenas_i_dirty_npurge) +CTL_PROTO(stats_arenas_i_dirty_nmadvise) +CTL_PROTO(stats_arenas_i_dirty_purged) +CTL_PROTO(stats_arenas_i_muzzy_npurge) +CTL_PROTO(stats_arenas_i_muzzy_nmadvise) +CTL_PROTO(stats_arenas_i_muzzy_purged) +CTL_PROTO(stats_arenas_i_base) +CTL_PROTO(stats_arenas_i_internal) +CTL_PROTO(stats_arenas_i_metadata_thp) +CTL_PROTO(stats_arenas_i_tcache_bytes) +CTL_PROTO(stats_arenas_i_resident) +CTL_PROTO(stats_arenas_i_abandoned_vm) +INDEX_PROTO(stats_arenas_i) +CTL_PROTO(stats_allocated) +CTL_PROTO(stats_active) +CTL_PROTO(stats_background_thread_num_threads) +CTL_PROTO(stats_background_thread_num_runs) +CTL_PROTO(stats_background_thread_run_interval) +CTL_PROTO(stats_metadata) +CTL_PROTO(stats_metadata_thp) +CTL_PROTO(stats_resident) +CTL_PROTO(stats_mapped) +CTL_PROTO(stats_retained) +CTL_PROTO(experimental_hooks_install) +CTL_PROTO(experimental_hooks_remove) +CTL_PROTO(experimental_utilization_query) +CTL_PROTO(experimental_utilization_batch_query) +CTL_PROTO(experimental_arenas_i_pactivep) +INDEX_PROTO(experimental_arenas_i) + +#define MUTEX_STATS_CTL_PROTO_GEN(n) \ +CTL_PROTO(stats_##n##_num_ops) \ +CTL_PROTO(stats_##n##_num_wait) \ +CTL_PROTO(stats_##n##_num_spin_acq) \ +CTL_PROTO(stats_##n##_num_owner_switch) \ +CTL_PROTO(stats_##n##_total_wait_time) \ +CTL_PROTO(stats_##n##_max_wait_time) \ +CTL_PROTO(stats_##n##_max_num_thds) + +/* Global mutexes. */ +#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +/* Per arena mutexes. */ +#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +/* Arena bin mutexes. */ +MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex) +#undef MUTEX_STATS_CTL_PROTO_GEN + +CTL_PROTO(stats_mutexes_reset) + +/******************************************************************************/ +/* mallctl tree. */ + +#define NAME(n) {true}, n +#define CHILD(t, c) \ + sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ + (ctl_node_t *)c##_node, \ + NULL +#define CTL(c) 0, NULL, c##_ctl + +/* + * Only handles internal indexed nodes, since there are currently no external + * ones. + */ +#define INDEX(i) {false}, i##_index + +static const ctl_named_node_t thread_tcache_node[] = { + {NAME("enabled"), CTL(thread_tcache_enabled)}, + {NAME("flush"), CTL(thread_tcache_flush)} +}; + +static const ctl_named_node_t thread_prof_node[] = { + {NAME("name"), CTL(thread_prof_name)}, + {NAME("active"), CTL(thread_prof_active)} +}; + +static const ctl_named_node_t thread_node[] = { + {NAME("arena"), CTL(thread_arena)}, + {NAME("allocated"), CTL(thread_allocated)}, + {NAME("allocatedp"), CTL(thread_allocatedp)}, + {NAME("deallocated"), CTL(thread_deallocated)}, + {NAME("deallocatedp"), CTL(thread_deallocatedp)}, + {NAME("tcache"), CHILD(named, thread_tcache)}, + {NAME("prof"), CHILD(named, thread_prof)} +}; + +static const ctl_named_node_t config_node[] = { + {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, + {NAME("debug"), CTL(config_debug)}, + {NAME("fill"), CTL(config_fill)}, + {NAME("lazy_lock"), CTL(config_lazy_lock)}, + {NAME("malloc_conf"), CTL(config_malloc_conf)}, + {NAME("opt_safety_checks"), CTL(config_opt_safety_checks)}, + {NAME("prof"), CTL(config_prof)}, + {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, + {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, + {NAME("stats"), CTL(config_stats)}, + {NAME("utrace"), CTL(config_utrace)}, + {NAME("xmalloc"), CTL(config_xmalloc)} +}; + +static const ctl_named_node_t opt_node[] = { + {NAME("abort"), CTL(opt_abort)}, + {NAME("abort_conf"), CTL(opt_abort_conf)}, + {NAME("confirm_conf"), CTL(opt_confirm_conf)}, + {NAME("metadata_thp"), CTL(opt_metadata_thp)}, + {NAME("retain"), CTL(opt_retain)}, + {NAME("dss"), CTL(opt_dss)}, + {NAME("narenas"), CTL(opt_narenas)}, + {NAME("percpu_arena"), CTL(opt_percpu_arena)}, + {NAME("oversize_threshold"), CTL(opt_oversize_threshold)}, + {NAME("background_thread"), CTL(opt_background_thread)}, + {NAME("max_background_threads"), CTL(opt_max_background_threads)}, + {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)}, + {NAME("stats_print"), CTL(opt_stats_print)}, + {NAME("stats_print_opts"), CTL(opt_stats_print_opts)}, + {NAME("junk"), CTL(opt_junk)}, + {NAME("zero"), CTL(opt_zero)}, + {NAME("utrace"), CTL(opt_utrace)}, + {NAME("xmalloc"), CTL(opt_xmalloc)}, + {NAME("tcache"), CTL(opt_tcache)}, + {NAME("thp"), CTL(opt_thp)}, + {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)}, + {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, + {NAME("prof"), CTL(opt_prof)}, + {NAME("prof_prefix"), CTL(opt_prof_prefix)}, + {NAME("prof_active"), CTL(opt_prof_active)}, + {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)}, + {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, + {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, + {NAME("prof_gdump"), CTL(opt_prof_gdump)}, + {NAME("prof_final"), CTL(opt_prof_final)}, + {NAME("prof_leak"), CTL(opt_prof_leak)}, + {NAME("prof_accum"), CTL(opt_prof_accum)} +}; + +static const ctl_named_node_t tcache_node[] = { + {NAME("create"), CTL(tcache_create)}, + {NAME("flush"), CTL(tcache_flush)}, + {NAME("destroy"), CTL(tcache_destroy)} +}; + +static const ctl_named_node_t arena_i_node[] = { + {NAME("initialized"), CTL(arena_i_initialized)}, + {NAME("decay"), CTL(arena_i_decay)}, + {NAME("purge"), CTL(arena_i_purge)}, + {NAME("reset"), CTL(arena_i_reset)}, + {NAME("destroy"), CTL(arena_i_destroy)}, + {NAME("dss"), CTL(arena_i_dss)}, + {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)}, + {NAME("extent_hooks"), CTL(arena_i_extent_hooks)}, + {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)} +}; +static const ctl_named_node_t super_arena_i_node[] = { + {NAME(""), CHILD(named, arena_i)} +}; + +static const ctl_indexed_node_t arena_node[] = { + {INDEX(arena_i)} +}; + +static const ctl_named_node_t arenas_bin_i_node[] = { + {NAME("size"), CTL(arenas_bin_i_size)}, + {NAME("nregs"), CTL(arenas_bin_i_nregs)}, + {NAME("slab_size"), CTL(arenas_bin_i_slab_size)}, + {NAME("nshards"), CTL(arenas_bin_i_nshards)} +}; +static const ctl_named_node_t super_arenas_bin_i_node[] = { + {NAME(""), CHILD(named, arenas_bin_i)} +}; + +static const ctl_indexed_node_t arenas_bin_node[] = { + {INDEX(arenas_bin_i)} +}; + +static const ctl_named_node_t arenas_lextent_i_node[] = { + {NAME("size"), CTL(arenas_lextent_i_size)} +}; +static const ctl_named_node_t super_arenas_lextent_i_node[] = { + {NAME(""), CHILD(named, arenas_lextent_i)} +}; + +static const ctl_indexed_node_t arenas_lextent_node[] = { + {INDEX(arenas_lextent_i)} +}; + +static const ctl_named_node_t arenas_node[] = { + {NAME("narenas"), CTL(arenas_narenas)}, + {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)}, + {NAME("quantum"), CTL(arenas_quantum)}, + {NAME("page"), CTL(arenas_page)}, + {NAME("tcache_max"), CTL(arenas_tcache_max)}, + {NAME("nbins"), CTL(arenas_nbins)}, + {NAME("nhbins"), CTL(arenas_nhbins)}, + {NAME("bin"), CHILD(indexed, arenas_bin)}, + {NAME("nlextents"), CTL(arenas_nlextents)}, + {NAME("lextent"), CHILD(indexed, arenas_lextent)}, + {NAME("create"), CTL(arenas_create)}, + {NAME("lookup"), CTL(arenas_lookup)} +}; + +static const ctl_named_node_t prof_node[] = { + {NAME("thread_active_init"), CTL(prof_thread_active_init)}, + {NAME("active"), CTL(prof_active)}, + {NAME("dump"), CTL(prof_dump)}, + {NAME("gdump"), CTL(prof_gdump)}, + {NAME("reset"), CTL(prof_reset)}, + {NAME("interval"), CTL(prof_interval)}, + {NAME("lg_sample"), CTL(lg_prof_sample)}, + {NAME("log_start"), CTL(prof_log_start)}, + {NAME("log_stop"), CTL(prof_log_stop)} +}; +static const ctl_named_node_t stats_arenas_i_small_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}, + {NAME("nfills"), CTL(stats_arenas_i_small_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_small_nflushes)} +}; + +static const ctl_named_node_t stats_arenas_i_large_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}, + {NAME("nfills"), CTL(stats_arenas_i_large_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_large_nflushes)} +}; + +#define MUTEX_PROF_DATA_NODE(prefix) \ +static const ctl_named_node_t stats_##prefix##_node[] = { \ + {NAME("num_ops"), \ + CTL(stats_##prefix##_num_ops)}, \ + {NAME("num_wait"), \ + CTL(stats_##prefix##_num_wait)}, \ + {NAME("num_spin_acq"), \ + CTL(stats_##prefix##_num_spin_acq)}, \ + {NAME("num_owner_switch"), \ + CTL(stats_##prefix##_num_owner_switch)}, \ + {NAME("total_wait_time"), \ + CTL(stats_##prefix##_total_wait_time)}, \ + {NAME("max_wait_time"), \ + CTL(stats_##prefix##_max_wait_time)}, \ + {NAME("max_num_thds"), \ + CTL(stats_##prefix##_max_num_thds)} \ + /* Note that # of current waiting thread not provided. */ \ +}; + +MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex) + +static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, + {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, + {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, + {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)}, + {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)}, + {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}, + {NAME("nonfull_slabs"), CTL(stats_arenas_i_bins_j_nonfull_slabs)}, + {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)} +}; + +static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_bins_j)} +}; + +static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { + {INDEX(stats_arenas_i_bins_j)} +}; + +static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)}, + {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)} +}; +static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_lextents_j)} +}; + +static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = { + {INDEX(stats_arenas_i_lextents_j)} +}; + +static const ctl_named_node_t stats_arenas_i_extents_j_node[] = { + {NAME("ndirty"), CTL(stats_arenas_i_extents_j_ndirty)}, + {NAME("nmuzzy"), CTL(stats_arenas_i_extents_j_nmuzzy)}, + {NAME("nretained"), CTL(stats_arenas_i_extents_j_nretained)}, + {NAME("dirty_bytes"), CTL(stats_arenas_i_extents_j_dirty_bytes)}, + {NAME("muzzy_bytes"), CTL(stats_arenas_i_extents_j_muzzy_bytes)}, + {NAME("retained_bytes"), CTL(stats_arenas_i_extents_j_retained_bytes)} +}; + +static const ctl_named_node_t super_stats_arenas_i_extents_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_extents_j)} +}; + +static const ctl_indexed_node_t stats_arenas_i_extents_node[] = { + {INDEX(stats_arenas_i_extents_j)} +}; + +#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +static const ctl_named_node_t stats_arenas_i_mutexes_node[] = { +#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)}, +MUTEX_PROF_ARENA_MUTEXES +#undef OP +}; + +static const ctl_named_node_t stats_arenas_i_node[] = { + {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, + {NAME("uptime"), CTL(stats_arenas_i_uptime)}, + {NAME("dss"), CTL(stats_arenas_i_dss)}, + {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)}, + {NAME("pactive"), CTL(stats_arenas_i_pactive)}, + {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, + {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)}, + {NAME("mapped"), CTL(stats_arenas_i_mapped)}, + {NAME("retained"), CTL(stats_arenas_i_retained)}, + {NAME("extent_avail"), CTL(stats_arenas_i_extent_avail)}, + {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)}, + {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)}, + {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)}, + {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)}, + {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)}, + {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)}, + {NAME("base"), CTL(stats_arenas_i_base)}, + {NAME("internal"), CTL(stats_arenas_i_internal)}, + {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)}, + {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)}, + {NAME("resident"), CTL(stats_arenas_i_resident)}, + {NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)}, + {NAME("small"), CHILD(named, stats_arenas_i_small)}, + {NAME("large"), CHILD(named, stats_arenas_i_large)}, + {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, + {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}, + {NAME("extents"), CHILD(indexed, stats_arenas_i_extents)}, + {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)} +}; +static const ctl_named_node_t super_stats_arenas_i_node[] = { + {NAME(""), CHILD(named, stats_arenas_i)} +}; + +static const ctl_indexed_node_t stats_arenas_node[] = { + {INDEX(stats_arenas_i)} +}; + +static const ctl_named_node_t stats_background_thread_node[] = { + {NAME("num_threads"), CTL(stats_background_thread_num_threads)}, + {NAME("num_runs"), CTL(stats_background_thread_num_runs)}, + {NAME("run_interval"), CTL(stats_background_thread_run_interval)} +}; + +#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +static const ctl_named_node_t stats_mutexes_node[] = { +#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)}, +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + {NAME("reset"), CTL(stats_mutexes_reset)} +}; +#undef MUTEX_PROF_DATA_NODE + +static const ctl_named_node_t stats_node[] = { + {NAME("allocated"), CTL(stats_allocated)}, + {NAME("active"), CTL(stats_active)}, + {NAME("metadata"), CTL(stats_metadata)}, + {NAME("metadata_thp"), CTL(stats_metadata_thp)}, + {NAME("resident"), CTL(stats_resident)}, + {NAME("mapped"), CTL(stats_mapped)}, + {NAME("retained"), CTL(stats_retained)}, + {NAME("background_thread"), + CHILD(named, stats_background_thread)}, + {NAME("mutexes"), CHILD(named, stats_mutexes)}, + {NAME("arenas"), CHILD(indexed, stats_arenas)} +}; + +static const ctl_named_node_t experimental_hooks_node[] = { + {NAME("install"), CTL(experimental_hooks_install)}, + {NAME("remove"), CTL(experimental_hooks_remove)} +}; + +static const ctl_named_node_t experimental_utilization_node[] = { + {NAME("query"), CTL(experimental_utilization_query)}, + {NAME("batch_query"), CTL(experimental_utilization_batch_query)} +}; + +static const ctl_named_node_t experimental_arenas_i_node[] = { + {NAME("pactivep"), CTL(experimental_arenas_i_pactivep)} +}; +static const ctl_named_node_t super_experimental_arenas_i_node[] = { + {NAME(""), CHILD(named, experimental_arenas_i)} +}; + +static const ctl_indexed_node_t experimental_arenas_node[] = { + {INDEX(experimental_arenas_i)} +}; + +static const ctl_named_node_t experimental_node[] = { + {NAME("hooks"), CHILD(named, experimental_hooks)}, + {NAME("utilization"), CHILD(named, experimental_utilization)}, + {NAME("arenas"), CHILD(indexed, experimental_arenas)} +}; + +static const ctl_named_node_t root_node[] = { + {NAME("version"), CTL(version)}, + {NAME("epoch"), CTL(epoch)}, + {NAME("background_thread"), CTL(background_thread)}, + {NAME("max_background_threads"), CTL(max_background_threads)}, + {NAME("thread"), CHILD(named, thread)}, + {NAME("config"), CHILD(named, config)}, + {NAME("opt"), CHILD(named, opt)}, + {NAME("tcache"), CHILD(named, tcache)}, + {NAME("arena"), CHILD(indexed, arena)}, + {NAME("arenas"), CHILD(named, arenas)}, + {NAME("prof"), CHILD(named, prof)}, + {NAME("stats"), CHILD(named, stats)}, + {NAME("experimental"), CHILD(named, experimental)} +}; +static const ctl_named_node_t super_root_node[] = { + {NAME(""), CHILD(named, root)} +}; + +#undef NAME +#undef CHILD +#undef CTL +#undef INDEX + +/******************************************************************************/ + +/* + * Sets *dst + *src non-atomically. This is safe, since everything is + * synchronized by the ctl mutex. + */ +static void +ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) { +#ifdef JEMALLOC_ATOMIC_U64 + uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); + uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED); + atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED); +#else + *dst += *src; +#endif +} + +/* Likewise: with ctl mutex synchronization, reading is simple. */ +static uint64_t +ctl_arena_stats_read_u64(arena_stats_u64_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_u64(p, ATOMIC_RELAXED); +#else + return *p; +#endif +} + +static void +accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) { + size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); + size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED); + atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED); +} + +/******************************************************************************/ + +static unsigned +arenas_i2a_impl(size_t i, bool compat, bool validate) { + unsigned a; + + switch (i) { + case MALLCTL_ARENAS_ALL: + a = 0; + break; + case MALLCTL_ARENAS_DESTROYED: + a = 1; + break; + default: + if (compat && i == ctl_arenas->narenas) { + /* + * Provide deprecated backward compatibility for + * accessing the merged stats at index narenas rather + * than via MALLCTL_ARENAS_ALL. This is scheduled for + * removal in 6.0.0. + */ + a = 0; + } else if (validate && i >= ctl_arenas->narenas) { + a = UINT_MAX; + } else { + /* + * This function should never be called for an index + * more than one past the range of indices that have + * initialized ctl data. + */ + assert(i < ctl_arenas->narenas || (!validate && i == + ctl_arenas->narenas)); + a = (unsigned)i + 2; + } + break; + } + + return a; +} + +static unsigned +arenas_i2a(size_t i) { + return arenas_i2a_impl(i, true, false); +} + +static ctl_arena_t * +arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) { + ctl_arena_t *ret; + + assert(!compat || !init); + + ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)]; + if (init && ret == NULL) { + if (config_stats) { + struct container_s { + ctl_arena_t ctl_arena; + ctl_arena_stats_t astats; + }; + struct container_s *cont = + (struct container_s *)base_alloc(tsd_tsdn(tsd), + b0get(), sizeof(struct container_s), QUANTUM); + if (cont == NULL) { + return NULL; + } + ret = &cont->ctl_arena; + ret->astats = &cont->astats; + } else { + ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(), + sizeof(ctl_arena_t), QUANTUM); + if (ret == NULL) { + return NULL; + } + } + ret->arena_ind = (unsigned)i; + ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret; + } + + assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i)); + return ret; +} + +static ctl_arena_t * +arenas_i(size_t i) { + ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false); + assert(ret != NULL); + return ret; +} + +static void +ctl_arena_clear(ctl_arena_t *ctl_arena) { + ctl_arena->nthreads = 0; + ctl_arena->dss = dss_prec_names[dss_prec_limit]; + ctl_arena->dirty_decay_ms = -1; + ctl_arena->muzzy_decay_ms = -1; + ctl_arena->pactive = 0; + ctl_arena->pdirty = 0; + ctl_arena->pmuzzy = 0; + if (config_stats) { + memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t)); + ctl_arena->astats->allocated_small = 0; + ctl_arena->astats->nmalloc_small = 0; + ctl_arena->astats->ndalloc_small = 0; + ctl_arena->astats->nrequests_small = 0; + ctl_arena->astats->nfills_small = 0; + ctl_arena->astats->nflushes_small = 0; + memset(ctl_arena->astats->bstats, 0, SC_NBINS * + sizeof(bin_stats_t)); + memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) * + sizeof(arena_stats_large_t)); + memset(ctl_arena->astats->estats, 0, SC_NPSIZES * + sizeof(arena_stats_extents_t)); + } +} + +static void +ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { + unsigned i; + + if (config_stats) { + arena_stats_merge(tsdn, arena, &ctl_arena->nthreads, + &ctl_arena->dss, &ctl_arena->dirty_decay_ms, + &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, + &ctl_arena->pdirty, &ctl_arena->pmuzzy, + &ctl_arena->astats->astats, ctl_arena->astats->bstats, + ctl_arena->astats->lstats, ctl_arena->astats->estats); + + for (i = 0; i < SC_NBINS; i++) { + ctl_arena->astats->allocated_small += + ctl_arena->astats->bstats[i].curregs * + sz_index2size(i); + ctl_arena->astats->nmalloc_small += + ctl_arena->astats->bstats[i].nmalloc; + ctl_arena->astats->ndalloc_small += + ctl_arena->astats->bstats[i].ndalloc; + ctl_arena->astats->nrequests_small += + ctl_arena->astats->bstats[i].nrequests; + ctl_arena->astats->nfills_small += + ctl_arena->astats->bstats[i].nfills; + ctl_arena->astats->nflushes_small += + ctl_arena->astats->bstats[i].nflushes; + } + } else { + arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads, + &ctl_arena->dss, &ctl_arena->dirty_decay_ms, + &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, + &ctl_arena->pdirty, &ctl_arena->pmuzzy); + } +} + +static void +ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, + bool destroyed) { + unsigned i; + + if (!destroyed) { + ctl_sdarena->nthreads += ctl_arena->nthreads; + ctl_sdarena->pactive += ctl_arena->pactive; + ctl_sdarena->pdirty += ctl_arena->pdirty; + ctl_sdarena->pmuzzy += ctl_arena->pmuzzy; + } else { + assert(ctl_arena->nthreads == 0); + assert(ctl_arena->pactive == 0); + assert(ctl_arena->pdirty == 0); + assert(ctl_arena->pmuzzy == 0); + } + + if (config_stats) { + ctl_arena_stats_t *sdstats = ctl_sdarena->astats; + ctl_arena_stats_t *astats = ctl_arena->astats; + + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.mapped, + &astats->astats.mapped); + accum_atomic_zu(&sdstats->astats.retained, + &astats->astats.retained); + accum_atomic_zu(&sdstats->astats.extent_avail, + &astats->astats.extent_avail); + } + + ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge, + &astats->astats.decay_dirty.npurge); + ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise, + &astats->astats.decay_dirty.nmadvise); + ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged, + &astats->astats.decay_dirty.purged); + + ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge, + &astats->astats.decay_muzzy.npurge); + ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise, + &astats->astats.decay_muzzy.nmadvise); + ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged, + &astats->astats.decay_muzzy.purged); + +#define OP(mtx) malloc_mutex_prof_merge( \ + &(sdstats->astats.mutex_prof_data[ \ + arena_prof_mutex_##mtx]), \ + &(astats->astats.mutex_prof_data[ \ + arena_prof_mutex_##mtx])); +MUTEX_PROF_ARENA_MUTEXES +#undef OP + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.base, + &astats->astats.base); + accum_atomic_zu(&sdstats->astats.internal, + &astats->astats.internal); + accum_atomic_zu(&sdstats->astats.resident, + &astats->astats.resident); + accum_atomic_zu(&sdstats->astats.metadata_thp, + &astats->astats.metadata_thp); + } else { + assert(atomic_load_zu( + &astats->astats.internal, ATOMIC_RELAXED) == 0); + } + + if (!destroyed) { + sdstats->allocated_small += astats->allocated_small; + } else { + assert(astats->allocated_small == 0); + } + sdstats->nmalloc_small += astats->nmalloc_small; + sdstats->ndalloc_small += astats->ndalloc_small; + sdstats->nrequests_small += astats->nrequests_small; + sdstats->nfills_small += astats->nfills_small; + sdstats->nflushes_small += astats->nflushes_small; + + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.allocated_large, + &astats->astats.allocated_large); + } else { + assert(atomic_load_zu(&astats->astats.allocated_large, + ATOMIC_RELAXED) == 0); + } + ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large, + &astats->astats.nmalloc_large); + ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large, + &astats->astats.ndalloc_large); + ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large, + &astats->astats.nrequests_large); + accum_atomic_zu(&sdstats->astats.abandoned_vm, + &astats->astats.abandoned_vm); + + accum_atomic_zu(&sdstats->astats.tcache_bytes, + &astats->astats.tcache_bytes); + + if (ctl_arena->arena_ind == 0) { + sdstats->astats.uptime = astats->astats.uptime; + } + + /* Merge bin stats. */ + for (i = 0; i < SC_NBINS; i++) { + sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; + sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; + sdstats->bstats[i].nrequests += + astats->bstats[i].nrequests; + if (!destroyed) { + sdstats->bstats[i].curregs += + astats->bstats[i].curregs; + } else { + assert(astats->bstats[i].curregs == 0); + } + sdstats->bstats[i].nfills += astats->bstats[i].nfills; + sdstats->bstats[i].nflushes += + astats->bstats[i].nflushes; + sdstats->bstats[i].nslabs += astats->bstats[i].nslabs; + sdstats->bstats[i].reslabs += astats->bstats[i].reslabs; + if (!destroyed) { + sdstats->bstats[i].curslabs += + astats->bstats[i].curslabs; + sdstats->bstats[i].nonfull_slabs += + astats->bstats[i].nonfull_slabs; + } else { + assert(astats->bstats[i].curslabs == 0); + assert(astats->bstats[i].nonfull_slabs == 0); + } + malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, + &astats->bstats[i].mutex_data); + } + + /* Merge stats for large allocations. */ + for (i = 0; i < SC_NSIZES - SC_NBINS; i++) { + ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc, + &astats->lstats[i].nmalloc); + ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc, + &astats->lstats[i].ndalloc); + ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests, + &astats->lstats[i].nrequests); + if (!destroyed) { + sdstats->lstats[i].curlextents += + astats->lstats[i].curlextents; + } else { + assert(astats->lstats[i].curlextents == 0); + } + } + + /* Merge extents stats. */ + for (i = 0; i < SC_NPSIZES; i++) { + accum_atomic_zu(&sdstats->estats[i].ndirty, + &astats->estats[i].ndirty); + accum_atomic_zu(&sdstats->estats[i].nmuzzy, + &astats->estats[i].nmuzzy); + accum_atomic_zu(&sdstats->estats[i].nretained, + &astats->estats[i].nretained); + accum_atomic_zu(&sdstats->estats[i].dirty_bytes, + &astats->estats[i].dirty_bytes); + accum_atomic_zu(&sdstats->estats[i].muzzy_bytes, + &astats->estats[i].muzzy_bytes); + accum_atomic_zu(&sdstats->estats[i].retained_bytes, + &astats->estats[i].retained_bytes); + } + } +} + +static void +ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, + unsigned i, bool destroyed) { + ctl_arena_t *ctl_arena = arenas_i(i); + + ctl_arena_clear(ctl_arena); + ctl_arena_stats_amerge(tsdn, ctl_arena, arena); + /* Merge into sum stats as well. */ + ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed); +} + +static unsigned +ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { + unsigned arena_ind; + ctl_arena_t *ctl_arena; + + if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) != + NULL) { + ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link); + arena_ind = ctl_arena->arena_ind; + } else { + arena_ind = ctl_arenas->narenas; + } + + /* Trigger stats allocation. */ + if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) { + return UINT_MAX; + } + + /* Initialize new arena. */ + if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) { + return UINT_MAX; + } + + if (arena_ind == ctl_arenas->narenas) { + ctl_arenas->narenas++; + } + + return arena_ind; +} + +static void +ctl_background_thread_stats_read(tsdn_t *tsdn) { + background_thread_stats_t *stats = &ctl_stats->background_thread; + if (!have_background_thread || + background_thread_stats_read(tsdn, stats)) { + memset(stats, 0, sizeof(background_thread_stats_t)); + nstime_init(&stats->run_interval, 0); + } +} + +static void +ctl_refresh(tsdn_t *tsdn) { + unsigned i; + ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL); + VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas); + + /* + * Clear sum stats, since they will be merged into by + * ctl_arena_refresh(). + */ + ctl_arena_clear(ctl_sarena); + + for (i = 0; i < ctl_arenas->narenas; i++) { + tarenas[i] = arena_get(tsdn, i, false); + } + + for (i = 0; i < ctl_arenas->narenas; i++) { + ctl_arena_t *ctl_arena = arenas_i(i); + bool initialized = (tarenas[i] != NULL); + + ctl_arena->initialized = initialized; + if (initialized) { + ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i, + false); + } + } + + if (config_stats) { + ctl_stats->allocated = ctl_sarena->astats->allocated_small + + atomic_load_zu(&ctl_sarena->astats->astats.allocated_large, + ATOMIC_RELAXED); + ctl_stats->active = (ctl_sarena->pactive << LG_PAGE); + ctl_stats->metadata = atomic_load_zu( + &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) + + atomic_load_zu(&ctl_sarena->astats->astats.internal, + ATOMIC_RELAXED); + ctl_stats->metadata_thp = atomic_load_zu( + &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED); + ctl_stats->resident = atomic_load_zu( + &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED); + ctl_stats->mapped = atomic_load_zu( + &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED); + ctl_stats->retained = atomic_load_zu( + &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED); + + ctl_background_thread_stats_read(tsdn); + +#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \ + malloc_mutex_lock(tsdn, &mtx); \ + malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \ + malloc_mutex_unlock(tsdn, &mtx); + + if (config_prof && opt_prof) { + READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof, + bt2gctx_mtx); + } + if (have_background_thread) { + READ_GLOBAL_MUTEX_PROF_DATA( + global_prof_mutex_background_thread, + background_thread_lock); + } else { + memset(&ctl_stats->mutex_prof_data[ + global_prof_mutex_background_thread], 0, + sizeof(mutex_prof_data_t)); + } + /* We own ctl mutex already. */ + malloc_mutex_prof_read(tsdn, + &ctl_stats->mutex_prof_data[global_prof_mutex_ctl], + &ctl_mtx); +#undef READ_GLOBAL_MUTEX_PROF_DATA + } + ctl_arenas->epoch++; +} + +static bool +ctl_init(tsd_t *tsd) { + bool ret; + tsdn_t *tsdn = tsd_tsdn(tsd); + + malloc_mutex_lock(tsdn, &ctl_mtx); + if (!ctl_initialized) { + ctl_arena_t *ctl_sarena, *ctl_darena; + unsigned i; + + /* + * Allocate demand-zeroed space for pointers to the full + * range of supported arena indices. + */ + if (ctl_arenas == NULL) { + ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn, + b0get(), sizeof(ctl_arenas_t), QUANTUM); + if (ctl_arenas == NULL) { + ret = true; + goto label_return; + } + } + + if (config_stats && ctl_stats == NULL) { + ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(), + sizeof(ctl_stats_t), QUANTUM); + if (ctl_stats == NULL) { + ret = true; + goto label_return; + } + } + + /* + * Allocate space for the current full range of arenas + * here rather than doing it lazily elsewhere, in order + * to limit when OOM-caused errors can occur. + */ + if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false, + true)) == NULL) { + ret = true; + goto label_return; + } + ctl_sarena->initialized = true; + + if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED, + false, true)) == NULL) { + ret = true; + goto label_return; + } + ctl_arena_clear(ctl_darena); + /* + * Don't toggle ctl_darena to initialized until an arena is + * actually destroyed, so that arena..initialized can be used + * to query whether the stats are relevant. + */ + + ctl_arenas->narenas = narenas_total_get(); + for (i = 0; i < ctl_arenas->narenas; i++) { + if (arenas_i_impl(tsd, i, false, true) == NULL) { + ret = true; + goto label_return; + } + } + + ql_new(&ctl_arenas->destroyed); + ctl_refresh(tsdn); + + ctl_initialized = true; + } + + ret = false; +label_return: + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; +} + +static int +ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, + size_t *mibp, size_t *depthp) { + int ret; + const char *elm, *tdot, *dot; + size_t elen, i, j; + const ctl_named_node_t *node; + + elm = name; + /* Equivalent to strchrnul(). */ + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + if (elen == 0) { + ret = ENOENT; + goto label_return; + } + node = super_root_node; + for (i = 0; i < *depthp; i++) { + assert(node); + assert(node->nchildren > 0); + if (ctl_named_node(node->children) != NULL) { + const ctl_named_node_t *pnode = node; + + /* Children are named. */ + for (j = 0; j < node->nchildren; j++) { + const ctl_named_node_t *child = + ctl_named_children(node, j); + if (strlen(child->name) == elen && + strncmp(elm, child->name, elen) == 0) { + node = child; + if (nodesp != NULL) { + nodesp[i] = + (const ctl_node_t *)node; + } + mibp[i] = j; + break; + } + } + if (node == pnode) { + ret = ENOENT; + goto label_return; + } + } else { + uintmax_t index; + const ctl_indexed_node_t *inode; + + /* Children are indexed. */ + index = malloc_strtoumax(elm, NULL, 10); + if (index == UINTMAX_MAX || index > SIZE_T_MAX) { + ret = ENOENT; + goto label_return; + } + + inode = ctl_indexed_node(node->children); + node = inode->index(tsdn, mibp, *depthp, (size_t)index); + if (node == NULL) { + ret = ENOENT; + goto label_return; + } + + if (nodesp != NULL) { + nodesp[i] = (const ctl_node_t *)node; + } + mibp[i] = (size_t)index; + } + + if (node->ctl != NULL) { + /* Terminal node. */ + if (*dot != '\0') { + /* + * The name contains more elements than are + * in this path through the tree. + */ + ret = ENOENT; + goto label_return; + } + /* Complete lookup successful. */ + *depthp = i + 1; + break; + } + + /* Update elm. */ + if (*dot == '\0') { + /* No more elements. */ + ret = ENOENT; + goto label_return; + } + elm = &dot[1]; + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : + strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + } + + ret = 0; +label_return: + return ret; +} + +int +ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) { + int ret; + size_t depth; + ctl_node_t const *nodes[CTL_MAX_DEPTH]; + size_t mib[CTL_MAX_DEPTH]; + const ctl_named_node_t *node; + + if (!ctl_initialized && ctl_init(tsd)) { + ret = EAGAIN; + goto label_return; + } + + depth = CTL_MAX_DEPTH; + ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); + if (ret != 0) { + goto label_return; + } + + node = ctl_named_node(nodes[depth-1]); + if (node != NULL && node->ctl) { + ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); + } else { + /* The name refers to a partial path through the ctl tree. */ + ret = ENOENT; + } + +label_return: + return(ret); +} + +int +ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) { + int ret; + + if (!ctl_initialized && ctl_init(tsd)) { + ret = EAGAIN; + goto label_return; + } + + ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp); +label_return: + return(ret); +} + +int +ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + const ctl_named_node_t *node; + size_t i; + + if (!ctl_initialized && ctl_init(tsd)) { + ret = EAGAIN; + goto label_return; + } + + /* Iterate down the tree. */ + node = super_root_node; + for (i = 0; i < miblen; i++) { + assert(node); + assert(node->nchildren > 0); + if (ctl_named_node(node->children) != NULL) { + /* Children are named. */ + if (node->nchildren <= mib[i]) { + ret = ENOENT; + goto label_return; + } + node = ctl_named_children(node, mib[i]); + } else { + const ctl_indexed_node_t *inode; + + /* Indexed element. */ + inode = ctl_indexed_node(node->children); + node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); + if (node == NULL) { + ret = ENOENT; + goto label_return; + } + } + } + + /* Call the ctl function. */ + if (node && node->ctl) { + ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + } else { + /* Partial MIB. */ + ret = ENOENT; + } + +label_return: + return(ret); +} + +bool +ctl_boot(void) { + if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL, + malloc_mutex_rank_exclusive)) { + return true; + } + + ctl_initialized = false; + + return false; +} + +void +ctl_prefork(tsdn_t *tsdn) { + malloc_mutex_prefork(tsdn, &ctl_mtx); +} + +void +ctl_postfork_parent(tsdn_t *tsdn) { + malloc_mutex_postfork_parent(tsdn, &ctl_mtx); +} + +void +ctl_postfork_child(tsdn_t *tsdn) { + malloc_mutex_postfork_child(tsdn, &ctl_mtx); +} + +/******************************************************************************/ +/* *_ctl() functions. */ + +#define READONLY() do { \ + if (newp != NULL || newlen != 0) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define WRITEONLY() do { \ + if (oldp != NULL || oldlenp != NULL) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define READ_XOR_WRITE() do { \ + if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ + newlen != 0)) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define READ(v, t) do { \ + if (oldp != NULL && oldlenp != NULL) { \ + if (*oldlenp != sizeof(t)) { \ + size_t copylen = (sizeof(t) <= *oldlenp) \ + ? sizeof(t) : *oldlenp; \ + memcpy(oldp, (void *)&(v), copylen); \ + ret = EINVAL; \ + goto label_return; \ + } \ + *(t *)oldp = (v); \ + } \ +} while (0) + +#define WRITE(v, t) do { \ + if (newp != NULL) { \ + if (newlen != sizeof(t)) { \ + ret = EINVAL; \ + goto label_return; \ + } \ + (v) = *(t *)newp; \ + } \ +} while (0) + +#define MIB_UNSIGNED(v, i) do { \ + if (mib[i] > UINT_MAX) { \ + ret = EFAULT; \ + goto label_return; \ + } \ + v = (unsigned)mib[i]; \ +} while (0) + +/* + * There's a lot of code duplication in the following macros due to limitations + * in how nested cpp macros are expanded. + */ +#define CTL_RO_CLGEN(c, l, n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + if (!(c)) { \ + return ENOENT; \ + } \ + if (l) { \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + } \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + if (l) { \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + } \ + return ret; \ +} + +#define CTL_RO_CGEN(c, n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + if (!(c)) { \ + return ENOENT; \ + } \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return ret; \ +} + +#define CTL_RO_GEN(n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return ret; \ +} + +/* + * ctl_mtx is not acquired, under the assumption that no pertinent data will + * mutate during the call. + */ +#define CTL_RO_NL_CGEN(c, n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + if (!(c)) { \ + return ENOENT; \ + } \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return ret; \ +} + +#define CTL_RO_NL_GEN(n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return ret; \ +} + +#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + if (!(c)) { \ + return ENOENT; \ + } \ + READONLY(); \ + oldval = (m(tsd)); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return ret; \ +} + +#define CTL_RO_CONFIG_GEN(n, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + READONLY(); \ + oldval = n; \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return ret; \ +} + +/******************************************************************************/ + +CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) + +static int +epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + UNUSED uint64_t newval; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + WRITE(newval, uint64_t); + if (newp != NULL) { + ctl_refresh(tsd_tsdn(tsd)); + } + READ(ctl_arenas->epoch, uint64_t); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +background_thread_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) { + int ret; + bool oldval; + + if (!have_background_thread) { + return ENOENT; + } + background_thread_ctl_init(tsd_tsdn(tsd)); + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + if (newp == NULL) { + oldval = background_thread_enabled(); + READ(oldval, bool); + } else { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = background_thread_enabled(); + READ(oldval, bool); + + bool newval = *(bool *)newp; + if (newval == oldval) { + ret = 0; + goto label_return; + } + + background_thread_enabled_set(tsd_tsdn(tsd), newval); + if (newval) { + if (background_threads_enable(tsd)) { + ret = EFAULT; + goto label_return; + } + } else { + if (background_threads_disable(tsd)) { + ret = EFAULT; + goto label_return; + } + } + } + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + + return ret; +} + +static int +max_background_threads_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { + int ret; + size_t oldval; + + if (!have_background_thread) { + return ENOENT; + } + background_thread_ctl_init(tsd_tsdn(tsd)); + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + if (newp == NULL) { + oldval = max_background_threads; + READ(oldval, size_t); + } else { + if (newlen != sizeof(size_t)) { + ret = EINVAL; + goto label_return; + } + oldval = max_background_threads; + READ(oldval, size_t); + + size_t newval = *(size_t *)newp; + if (newval == oldval) { + ret = 0; + goto label_return; + } + if (newval > opt_max_background_threads) { + ret = EINVAL; + goto label_return; + } + + if (background_thread_enabled()) { + background_thread_enabled_set(tsd_tsdn(tsd), false); + if (background_threads_disable(tsd)) { + ret = EFAULT; + goto label_return; + } + max_background_threads = newval; + background_thread_enabled_set(tsd_tsdn(tsd), true); + if (background_threads_enable(tsd)) { + ret = EFAULT; + goto label_return; + } + } else { + max_background_threads = newval; + } + } + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + + return ret; +} + +/******************************************************************************/ + +CTL_RO_CONFIG_GEN(config_cache_oblivious, bool) +CTL_RO_CONFIG_GEN(config_debug, bool) +CTL_RO_CONFIG_GEN(config_fill, bool) +CTL_RO_CONFIG_GEN(config_lazy_lock, bool) +CTL_RO_CONFIG_GEN(config_malloc_conf, const char *) +CTL_RO_CONFIG_GEN(config_opt_safety_checks, bool) +CTL_RO_CONFIG_GEN(config_prof, bool) +CTL_RO_CONFIG_GEN(config_prof_libgcc, bool) +CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) +CTL_RO_CONFIG_GEN(config_stats, bool) +CTL_RO_CONFIG_GEN(config_utrace, bool) +CTL_RO_CONFIG_GEN(config_xmalloc, bool) + +/******************************************************************************/ + +CTL_RO_NL_GEN(opt_abort, opt_abort, bool) +CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool) +CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool) +CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp], + const char *) +CTL_RO_NL_GEN(opt_retain, opt_retain, bool) +CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) +CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) +CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena], + const char *) +CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t) +CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool) +CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t) +CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t) +CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t) +CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) +CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *) +CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) +CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) +CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) +CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) +CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) +CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *) +CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit, + size_t) +CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) +CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) +CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init, + opt_prof_thread_active_init, bool) +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) + +/******************************************************************************/ + +static int +thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + arena_t *oldarena; + unsigned newind, oldind; + + oldarena = arena_choose(tsd, NULL); + if (oldarena == NULL) { + return EAGAIN; + } + newind = oldind = arena_ind_get(oldarena); + WRITE(newind, unsigned); + READ(oldind, unsigned); + + if (newind != oldind) { + arena_t *newarena; + + if (newind >= narenas_total_get()) { + /* New arena index is out of range. */ + ret = EFAULT; + goto label_return; + } + + if (have_percpu_arena && + PERCPU_ARENA_ENABLED(opt_percpu_arena)) { + if (newind < percpu_arena_ind_limit(opt_percpu_arena)) { + /* + * If perCPU arena is enabled, thread_arena + * control is not allowed for the auto arena + * range. + */ + ret = EPERM; + goto label_return; + } + } + + /* Initialize arena if necessary. */ + newarena = arena_get(tsd_tsdn(tsd), newind, true); + if (newarena == NULL) { + ret = EAGAIN; + goto label_return; + } + /* Set new arena/tcache associations. */ + arena_migrate(tsd, oldind, newind); + if (tcache_available(tsd)) { + tcache_arena_reassociate(tsd_tsdn(tsd), + tsd_tcachep_get(tsd), newarena); + } + } + + ret = 0; +label_return: + return ret; +} + +CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, + uint64_t) +CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, + uint64_t *) +CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get, + uint64_t) +CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, + tsd_thread_deallocatedp_get, uint64_t *) + +static int +thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { + int ret; + bool oldval; + + oldval = tcache_enabled_get(tsd); + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + tcache_enabled_set(tsd, *(bool *)newp); + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +static int +thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { + int ret; + + if (!tcache_available(tsd)) { + ret = EFAULT; + goto label_return; + } + + READONLY(); + WRITEONLY(); + + tcache_flush(tsd); + + ret = 0; +label_return: + return ret; +} + +static int +thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { + int ret; + + if (!config_prof) { + return ENOENT; + } + + READ_XOR_WRITE(); + + if (newp != NULL) { + if (newlen != sizeof(const char *)) { + ret = EINVAL; + goto label_return; + } + + if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != + 0) { + goto label_return; + } + } else { + const char *oldname = prof_thread_name_get(tsd); + READ(oldname, const char *); + } + + ret = 0; +label_return: + return ret; +} + +static int +thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { + int ret; + bool oldval; + + if (!config_prof) { + return ENOENT; + } + + oldval = prof_thread_active_get(tsd); + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + if (prof_thread_active_set(tsd, *(bool *)newp)) { + ret = EAGAIN; + goto label_return; + } + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +/******************************************************************************/ + +static int +tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned tcache_ind; + + READONLY(); + if (tcaches_create(tsd, &tcache_ind)) { + ret = EFAULT; + goto label_return; + } + READ(tcache_ind, unsigned); + + ret = 0; +label_return: + return ret; +} + +static int +tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned tcache_ind; + + WRITEONLY(); + tcache_ind = UINT_MAX; + WRITE(tcache_ind, unsigned); + if (tcache_ind == UINT_MAX) { + ret = EFAULT; + goto label_return; + } + tcaches_flush(tsd, tcache_ind); + + ret = 0; +label_return: + return ret; +} + +static int +tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned tcache_ind; + + WRITEONLY(); + tcache_ind = UINT_MAX; + WRITE(tcache_ind, unsigned); + if (tcache_ind == UINT_MAX) { + ret = EFAULT; + goto label_return; + } + tcaches_destroy(tsd, tcache_ind); + + ret = 0; +label_return: + return ret; +} + +/******************************************************************************/ + +static int +arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + tsdn_t *tsdn = tsd_tsdn(tsd); + unsigned arena_ind; + bool initialized; + + READONLY(); + MIB_UNSIGNED(arena_ind, 1); + + malloc_mutex_lock(tsdn, &ctl_mtx); + initialized = arenas_i(arena_ind)->initialized; + malloc_mutex_unlock(tsdn, &ctl_mtx); + + READ(initialized, bool); + + ret = 0; +label_return: + return ret; +} + +static void +arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) { + malloc_mutex_lock(tsdn, &ctl_mtx); + { + unsigned narenas = ctl_arenas->narenas; + + /* + * Access via index narenas is deprecated, and scheduled for + * removal in 6.0.0. + */ + if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) { + unsigned i; + VARIABLE_ARRAY(arena_t *, tarenas, narenas); + + for (i = 0; i < narenas; i++) { + tarenas[i] = arena_get(tsdn, i, false); + } + + /* + * No further need to hold ctl_mtx, since narenas and + * tarenas contain everything needed below. + */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + for (i = 0; i < narenas; i++) { + if (tarenas[i] != NULL) { + arena_decay(tsdn, tarenas[i], false, + all); + } + } + } else { + arena_t *tarena; + + assert(arena_ind < narenas); + + tarena = arena_get(tsdn, arena_ind, false); + + /* No further need to hold ctl_mtx. */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + if (tarena != NULL) { + arena_decay(tsdn, tarena, false, all); + } + } + } +} + +static int +arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + + READONLY(); + WRITEONLY(); + MIB_UNSIGNED(arena_ind, 1); + arena_i_decay(tsd_tsdn(tsd), arena_ind, false); + + ret = 0; +label_return: + return ret; +} + +static int +arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + + READONLY(); + WRITEONLY(); + MIB_UNSIGNED(arena_ind, 1); + arena_i_decay(tsd_tsdn(tsd), arena_ind, true); + + ret = 0; +label_return: + return ret; +} + +static int +arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind, + arena_t **arena) { + int ret; + + READONLY(); + WRITEONLY(); + MIB_UNSIGNED(*arena_ind, 1); + + *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false); + if (*arena == NULL || arena_is_auto(*arena)) { + ret = EFAULT; + goto label_return; + } + + ret = 0; +label_return: + return ret; +} + +static void +arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) { + /* Temporarily disable the background thread during arena reset. */ + if (have_background_thread) { + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + if (background_thread_enabled()) { + background_thread_info_t *info = + background_thread_info_get(arena_ind); + assert(info->state == background_thread_started); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_paused; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + } +} + +static void +arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) { + if (have_background_thread) { + if (background_thread_enabled()) { + background_thread_info_t *info = + background_thread_info_get(arena_ind); + assert(info->state == background_thread_paused); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_started; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + } +} + +static int +arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + + ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, + newp, newlen, &arena_ind, &arena); + if (ret != 0) { + return ret; + } + + arena_reset_prepare_background_thread(tsd, arena_ind); + arena_reset(tsd, arena); + arena_reset_finish_background_thread(tsd, arena_ind); + + return ret; +} + +static int +arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + ctl_arena_t *ctl_darena, *ctl_arena; + + ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, + newp, newlen, &arena_ind, &arena); + if (ret != 0) { + goto label_return; + } + + if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena, + true) != 0) { + ret = EFAULT; + goto label_return; + } + + arena_reset_prepare_background_thread(tsd, arena_ind); + /* Merge stats after resetting and purging arena. */ + arena_reset(tsd, arena); + arena_decay(tsd_tsdn(tsd), arena, false, true); + ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED); + ctl_darena->initialized = true; + ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true); + /* Destroy arena. */ + arena_destroy(tsd, arena); + ctl_arena = arenas_i(arena_ind); + ctl_arena->initialized = false; + /* Record arena index for later recycling via arenas.create. */ + ql_elm_new(ctl_arena, destroyed_link); + ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link); + arena_reset_finish_background_thread(tsd, arena_ind); + + assert(ret == 0); +label_return: + return ret; +} + +static int +arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + const char *dss = NULL; + unsigned arena_ind; + dss_prec_t dss_prec_old = dss_prec_limit; + dss_prec_t dss_prec = dss_prec_limit; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + WRITE(dss, const char *); + MIB_UNSIGNED(arena_ind, 1); + if (dss != NULL) { + int i; + bool match = false; + + for (i = 0; i < dss_prec_limit; i++) { + if (strcmp(dss_prec_names[i], dss) == 0) { + dss_prec = i; + match = true; + break; + } + } + + if (!match) { + ret = EINVAL; + goto label_return; + } + } + + /* + * Access via index narenas is deprecated, and scheduled for removal in + * 6.0.0. + */ + if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == + ctl_arenas->narenas) { + if (dss_prec != dss_prec_limit && + extent_dss_prec_set(dss_prec)) { + ret = EFAULT; + goto label_return; + } + dss_prec_old = extent_dss_prec_get(); + } else { + arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL || (dss_prec != dss_prec_limit && + arena_dss_prec_set(arena, dss_prec))) { + ret = EFAULT; + goto label_return; + } + dss_prec_old = arena_dss_prec_get(arena); + } + + dss = dss_prec_names[dss_prec_old]; + READ(dss, const char *); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { + int ret; + unsigned arena_ind; + arena_t *arena; + + MIB_UNSIGNED(arena_ind, 1); + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL) { + ret = EFAULT; + goto label_return; + } + + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) : + arena_muzzy_decay_ms_get(arena); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (arena_is_huge(arena_ind) && *(ssize_t *)newp > 0) { + /* + * By default the huge arena purges eagerly. If it is + * set to non-zero decay time afterwards, background + * thread might be needed. + */ + if (background_thread_create(tsd, arena_ind)) { + ret = EFAULT; + goto label_return; + } + } + if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena, + *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd), + arena, *(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + + ret = 0; +label_return: + return ret; +} + +static int +arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, true); +} + +static int +arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, false); +} + +static int +arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + MIB_UNSIGNED(arena_ind, 1); + if (arena_ind < narenas_total_get()) { + extent_hooks_t *old_extent_hooks; + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL) { + if (arena_ind >= narenas_auto) { + ret = EFAULT; + goto label_return; + } + old_extent_hooks = + (extent_hooks_t *)&extent_hooks_default; + READ(old_extent_hooks, extent_hooks_t *); + if (newp != NULL) { + /* Initialize a new arena as a side effect. */ + extent_hooks_t *new_extent_hooks + JEMALLOC_CC_SILENCE_INIT(NULL); + WRITE(new_extent_hooks, extent_hooks_t *); + arena = arena_init(tsd_tsdn(tsd), arena_ind, + new_extent_hooks); + if (arena == NULL) { + ret = EFAULT; + goto label_return; + } + } + } else { + if (newp != NULL) { + extent_hooks_t *new_extent_hooks + JEMALLOC_CC_SILENCE_INIT(NULL); + WRITE(new_extent_hooks, extent_hooks_t *); + old_extent_hooks = extent_hooks_set(tsd, arena, + new_extent_hooks); + READ(old_extent_hooks, extent_hooks_t *); + } else { + old_extent_hooks = extent_hooks_get(arena); + READ(old_extent_hooks, extent_hooks_t *); + } + } + } else { + ret = EFAULT; + goto label_return; + } + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + + if (!opt_retain) { + /* Only relevant when retain is enabled. */ + return ENOENT; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + MIB_UNSIGNED(arena_ind, 1); + if (arena_ind < narenas_total_get() && (arena = + arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { + size_t old_limit, new_limit; + if (newp != NULL) { + WRITE(new_limit, size_t); + } + bool err = arena_retain_grow_limit_get_set(tsd, arena, + &old_limit, newp != NULL ? &new_limit : NULL); + if (!err) { + READ(old_limit, size_t); + ret = 0; + } else { + ret = EFAULT; + } + } else { + ret = EFAULT; + } +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static const ctl_named_node_t * +arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t i) { + const ctl_named_node_t *ret; + + malloc_mutex_lock(tsdn, &ctl_mtx); + switch (i) { + case MALLCTL_ARENAS_ALL: + case MALLCTL_ARENAS_DESTROYED: + break; + default: + if (i > ctl_arenas->narenas) { + ret = NULL; + goto label_return; + } + break; + } + + ret = super_arena_i_node; +label_return: + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; +} + +/******************************************************************************/ + +static int +arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned narenas; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + READONLY(); + if (*oldlenp != sizeof(unsigned)) { + ret = EINVAL; + goto label_return; + } + narenas = ctl_arenas->narenas; + READ(narenas, unsigned); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen, bool dirty) { + int ret; + + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() : + arena_muzzy_decay_ms_default_get()); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp) + : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + + ret = 0; +label_return: + return ret; +} + +static int +arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, true); +} + +static int +arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, false); +} + +CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) +CTL_RO_NL_GEN(arenas_page, PAGE, size_t) +CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t) +CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned) +CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned) +CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t) +CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t) +CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t) +CTL_RO_NL_GEN(arenas_bin_i_nshards, bin_infos[mib[2]].n_shards, uint32_t) +static const ctl_named_node_t * +arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t i) { + if (i > SC_NBINS) { + return NULL; + } + return super_arenas_bin_i_node; +} + +CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned) +CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(SC_NBINS+(szind_t)mib[2]), + size_t) +static const ctl_named_node_t * +arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t i) { + if (i > SC_NSIZES - SC_NBINS) { + return NULL; + } + return super_arenas_lextent_i_node; +} + +static int +arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + extent_hooks_t *extent_hooks; + unsigned arena_ind; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + + extent_hooks = (extent_hooks_t *)&extent_hooks_default; + WRITE(extent_hooks, extent_hooks_t *); + if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) { + ret = EAGAIN; + goto label_return; + } + READ(arena_ind, unsigned); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { + int ret; + unsigned arena_ind; + void *ptr; + extent_t *extent; + arena_t *arena; + + ptr = NULL; + ret = EINVAL; + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + WRITE(ptr, void *); + extent = iealloc(tsd_tsdn(tsd), ptr); + if (extent == NULL) + goto label_return; + + arena = extent_arena_get(extent); + if (arena == NULL) + goto label_return; + + arena_ind = arena_ind_get(arena); + READ(arena_ind, unsigned); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +/******************************************************************************/ + +static int +prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { + int ret; + bool oldval; + + if (!config_prof) { + return ENOENT; + } + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_thread_active_init_set(tsd_tsdn(tsd), + *(bool *)newp); + } else { + oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +static int +prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + bool oldval; + + if (!config_prof) { + return ENOENT; + } + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); + } else { + oldval = prof_active_get(tsd_tsdn(tsd)); + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +static int +prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + const char *filename = NULL; + + if (!config_prof) { + return ENOENT; + } + + WRITEONLY(); + WRITE(filename, const char *); + + if (prof_mdump(tsd, filename)) { + ret = EFAULT; + goto label_return; + } + + ret = 0; +label_return: + return ret; +} + +static int +prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + bool oldval; + + if (!config_prof) { + return ENOENT; + } + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); + } else { + oldval = prof_gdump_get(tsd_tsdn(tsd)); + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +static int +prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + size_t lg_sample = lg_prof_sample; + + if (!config_prof) { + return ENOENT; + } + + WRITEONLY(); + WRITE(lg_sample, size_t); + if (lg_sample >= (sizeof(uint64_t) << 3)) { + lg_sample = (sizeof(uint64_t) << 3) - 1; + } + + prof_reset(tsd, lg_sample); + + ret = 0; +label_return: + return ret; +} + +CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) +CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) + +static int +prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + const char *filename = NULL; + + if (!config_prof) { + return ENOENT; + } + + WRITEONLY(); + WRITE(filename, const char *); + + if (prof_log_start(tsd_tsdn(tsd), filename)) { + ret = EFAULT; + goto label_return; + } + + ret = 0; +label_return: + return ret; +} + +static int +prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + if (!config_prof) { + return ENOENT; + } + + if (prof_log_stop(tsd_tsdn(tsd))) { + return EFAULT; + } + + return 0; +} + +/******************************************************************************/ + +CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t) +CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t) +CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t) +CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t) +CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t) +CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t) +CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t) + +CTL_RO_CGEN(config_stats, stats_background_thread_num_threads, + ctl_stats->background_thread.num_threads, size_t) +CTL_RO_CGEN(config_stats, stats_background_thread_num_runs, + ctl_stats->background_thread.num_runs, uint64_t) +CTL_RO_CGEN(config_stats, stats_background_thread_run_interval, + nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t) + +CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *) +CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms, + ssize_t) +CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms, + ssize_t) +CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned) +CTL_RO_GEN(stats_arenas_i_uptime, + nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t) +CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t) +CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t) +CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_retained, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail, + ATOMIC_RELAXED), + size_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_base, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_internal, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp, + ATOMIC_RELAXED), size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes, + ATOMIC_RELAXED), size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_resident, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm, + ATOMIC_RELAXED), size_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, + arenas_i(mib[2])->astats->allocated_small, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, + arenas_i(mib[2])->astats->nmalloc_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, + arenas_i(mib[2])->astats->ndalloc_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, + arenas_i(mib[2])->astats->nrequests_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills, + arenas_i(mib[2])->astats->nfills_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes, + arenas_i(mib[2])->astats->nflushes_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large, + ATOMIC_RELAXED), size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.nrequests_large), uint64_t) +/* + * Note: "nmalloc_large" here instead of "nfills" in the read. This is + * intentional (large has no batch fill). + */ +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.nflushes_large), uint64_t) + +/* Lock profiling related APIs below. */ +#define RO_MUTEX_CTL_GEN(n, l) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \ + l.n_lock_ops, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \ + l.n_wait_times, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \ + l.n_spin_acquired, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \ + l.n_owner_switches, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \ + nstime_ns(&l.tot_wait_time), uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \ + nstime_ns(&l.max_wait_time), uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \ + l.max_n_thds, uint32_t) + +/* Global mutexes. */ +#define OP(mtx) \ + RO_MUTEX_CTL_GEN(mutexes_##mtx, \ + ctl_stats->mutex_prof_data[global_prof_mutex_##mtx]) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +/* Per arena mutexes */ +#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \ + arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx]) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +/* tcache bin mutex */ +RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex, + arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data) +#undef RO_MUTEX_CTL_GEN + +/* Resets all mutex stats, including global, arena and bin mutexes. */ +static int +stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) { + if (!config_stats) { + return ENOENT; + } + + tsdn_t *tsdn = tsd_tsdn(tsd); + +#define MUTEX_PROF_RESET(mtx) \ + malloc_mutex_lock(tsdn, &mtx); \ + malloc_mutex_prof_data_reset(tsdn, &mtx); \ + malloc_mutex_unlock(tsdn, &mtx); + + /* Global mutexes: ctl and prof. */ + MUTEX_PROF_RESET(ctl_mtx); + if (have_background_thread) { + MUTEX_PROF_RESET(background_thread_lock); + } + if (config_prof && opt_prof) { + MUTEX_PROF_RESET(bt2gctx_mtx); + } + + + /* Per arena mutexes. */ + unsigned n = narenas_total_get(); + + for (unsigned i = 0; i < n; i++) { + arena_t *arena = arena_get(tsdn, i, false); + if (!arena) { + continue; + } + MUTEX_PROF_RESET(arena->large_mtx); + MUTEX_PROF_RESET(arena->extent_avail_mtx); + MUTEX_PROF_RESET(arena->extents_dirty.mtx); + MUTEX_PROF_RESET(arena->extents_muzzy.mtx); + MUTEX_PROF_RESET(arena->extents_retained.mtx); + MUTEX_PROF_RESET(arena->decay_dirty.mtx); + MUTEX_PROF_RESET(arena->decay_muzzy.mtx); + MUTEX_PROF_RESET(arena->tcache_ql_mtx); + MUTEX_PROF_RESET(arena->base->mtx); + + for (szind_t i = 0; i < SC_NBINS; i++) { + for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { + bin_t *bin = &arena->bins[i].bin_shards[j]; + MUTEX_PROF_RESET(bin->lock); + } + } + } +#undef MUTEX_PROF_RESET + return 0; +} + +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, + arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, + arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, + arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, + arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills, + arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes, + arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs, + arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t) + +static const ctl_named_node_t * +stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t j) { + if (j > SC_NBINS) { + return NULL; + } + return super_stats_arenas_i_bins_j_node; +} + +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, + arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) + +static const ctl_named_node_t * +stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t j) { + if (j > SC_NSIZES - SC_NBINS) { + return NULL; + } + return super_stats_arenas_i_lextents_j_node; +} + +CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty, + atomic_load_zu( + &arenas_i(mib[2])->astats->estats[mib[4]].ndirty, + ATOMIC_RELAXED), size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy, + atomic_load_zu( + &arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, + ATOMIC_RELAXED), size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained, + atomic_load_zu( + &arenas_i(mib[2])->astats->estats[mib[4]].nretained, + ATOMIC_RELAXED), size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes, + atomic_load_zu( + &arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, + ATOMIC_RELAXED), size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes, + atomic_load_zu( + &arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, + ATOMIC_RELAXED), size_t); +CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes, + atomic_load_zu( + &arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, + ATOMIC_RELAXED), size_t); + +static const ctl_named_node_t * +stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t j) { + if (j >= SC_NPSIZES) { + return NULL; + } + return super_stats_arenas_i_extents_j_node; +} + +static bool +ctl_arenas_i_verify(size_t i) { + size_t a = arenas_i2a_impl(i, true, true); + if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) { + return true; + } + + return false; +} + +static const ctl_named_node_t * +stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t i) { + const ctl_named_node_t *ret; + + malloc_mutex_lock(tsdn, &ctl_mtx); + if (ctl_arenas_i_verify(i)) { + ret = NULL; + goto label_return; + } + + ret = super_stats_arenas_i_node; +label_return: + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; +} + +static int +experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + if (oldp == NULL || oldlenp == NULL|| newp == NULL) { + ret = EINVAL; + goto label_return; + } + /* + * Note: this is a *private* struct. This is an experimental interface; + * forcing the user to know the jemalloc internals well enough to + * extract the ABI hopefully ensures nobody gets too comfortable with + * this API, which can change at a moment's notice. + */ + hooks_t hooks; + WRITE(hooks, hooks_t); + void *handle = hook_install(tsd_tsdn(tsd), &hooks); + if (handle == NULL) { + ret = EAGAIN; + goto label_return; + } + READ(handle, void *); + + ret = 0; +label_return: + return ret; +} + +static int +experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + WRITEONLY(); + void *handle = NULL; + WRITE(handle, void *); + if (handle == NULL) { + ret = EINVAL; + goto label_return; + } + hook_remove(tsd_tsdn(tsd), handle); + ret = 0; +label_return: + return ret; +} + +/* + * Output six memory utilization entries for an input pointer, the first one of + * type (void *) and the remaining five of type size_t, describing the following + * (in the same order): + * + * (a) memory address of the extent a potential reallocation would go into, + * == the five fields below describe about the extent the pointer resides in == + * (b) number of free regions in the extent, + * (c) number of regions in the extent, + * (d) size of the extent in terms of bytes, + * (e) total number of free regions in the bin the extent belongs to, and + * (f) total number of regions in the bin the extent belongs to. + * + * Note that "(e)" and "(f)" are only available when stats are enabled; + * otherwise their values are undefined. + * + * This API is mainly intended for small class allocations, where extents are + * used as slab. + * + * In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)" + * will be zero (if stats are enabled; otherwise undefined). The other three + * fields will be properly set though the values are trivial: "(b)" will be 0, + * "(c)" will be 1, and "(d)" will be the usable size. + * + * The input pointer and size are respectively passed in by newp and newlen, + * and the output fields and size are respectively oldp and *oldlenp. + * + * It can be beneficial to define the following macros to make it easier to + * access the output: + * + * #define SLABCUR_READ(out) (*(void **)out) + * #define COUNTS(out) ((size_t *)((void **)out + 1)) + * #define NFREE_READ(out) COUNTS(out)[0] + * #define NREGS_READ(out) COUNTS(out)[1] + * #define SIZE_READ(out) COUNTS(out)[2] + * #define BIN_NFREE_READ(out) COUNTS(out)[3] + * #define BIN_NREGS_READ(out) COUNTS(out)[4] + * + * and then write e.g. NFREE_READ(oldp) to fetch the output. See the unit test + * test_query in test/unit/extent_util.c for an example. + * + * For a typical defragmentation workflow making use of this API for + * understanding the fragmentation level, please refer to the comment for + * experimental_utilization_batch_query_ctl. + * + * It's up to the application how to determine the significance of + * fragmentation relying on the outputs returned. Possible choices are: + * + * (a) if extent utilization ratio is below certain threshold, + * (b) if extent memory consumption is above certain threshold, + * (c) if extent utilization ratio is significantly below bin utilization ratio, + * (d) if input pointer deviates a lot from potential reallocation address, or + * (e) some selection/combination of the above. + * + * The caller needs to make sure that the input/output arguments are valid, + * in particular, that the size of the output is correct, i.e.: + * + * *oldlenp = sizeof(void *) + sizeof(size_t) * 5 + * + * Otherwise, the function immediately returns EINVAL without touching anything. + * + * In the rare case where there's no associated extent found for the input + * pointer, the function zeros out all output fields and return. Please refer + * to the comment for experimental_utilization_batch_query_ctl to understand the + * motivation from C++. + */ +static int +experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + assert(sizeof(extent_util_stats_verbose_t) + == sizeof(void *) + sizeof(size_t) * 5); + + if (oldp == NULL || oldlenp == NULL + || *oldlenp != sizeof(extent_util_stats_verbose_t) + || newp == NULL) { + ret = EINVAL; + goto label_return; + } + + void *ptr = NULL; + WRITE(ptr, void *); + extent_util_stats_verbose_t *util_stats + = (extent_util_stats_verbose_t *)oldp; + extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr, + &util_stats->nfree, &util_stats->nregs, &util_stats->size, + &util_stats->bin_nfree, &util_stats->bin_nregs, + &util_stats->slabcur_addr); + ret = 0; + +label_return: + return ret; +} + +/* + * Given an input array of pointers, output three memory utilization entries of + * type size_t for each input pointer about the extent it resides in: + * + * (a) number of free regions in the extent, + * (b) number of regions in the extent, and + * (c) size of the extent in terms of bytes. + * + * This API is mainly intended for small class allocations, where extents are + * used as slab. In case of large class allocations, the outputs are trivial: + * "(a)" will be 0, "(b)" will be 1, and "(c)" will be the usable size. + * + * Note that multiple input pointers may reside on a same extent so the output + * fields may contain duplicates. + * + * The format of the input/output looks like: + * + * input[0]: 1st_pointer_to_query | output[0]: 1st_extent_n_free_regions + * | output[1]: 1st_extent_n_regions + * | output[2]: 1st_extent_size + * input[1]: 2nd_pointer_to_query | output[3]: 2nd_extent_n_free_regions + * | output[4]: 2nd_extent_n_regions + * | output[5]: 2nd_extent_size + * ... | ... + * + * The input array and size are respectively passed in by newp and newlen, and + * the output array and size are respectively oldp and *oldlenp. + * + * It can be beneficial to define the following macros to make it easier to + * access the output: + * + * #define NFREE_READ(out, i) out[(i) * 3] + * #define NREGS_READ(out, i) out[(i) * 3 + 1] + * #define SIZE_READ(out, i) out[(i) * 3 + 2] + * + * and then write e.g. NFREE_READ(oldp, i) to fetch the output. See the unit + * test test_batch in test/unit/extent_util.c for a concrete example. + * + * A typical workflow would be composed of the following steps: + * + * (1) flush tcache: mallctl("thread.tcache.flush", ...) + * (2) initialize input array of pointers to query fragmentation + * (3) allocate output array to hold utilization statistics + * (4) query utilization: mallctl("experimental.utilization.batch_query", ...) + * (5) (optional) decide if it's worthwhile to defragment; otherwise stop here + * (6) disable tcache: mallctl("thread.tcache.enabled", ...) + * (7) defragment allocations with significant fragmentation, e.g.: + * for each allocation { + * if it's fragmented { + * malloc(...); + * memcpy(...); + * free(...); + * } + * } + * (8) enable tcache: mallctl("thread.tcache.enabled", ...) + * + * The application can determine the significance of fragmentation themselves + * relying on the statistics returned, both at the overall level i.e. step "(5)" + * and at individual allocation level i.e. within step "(7)". Possible choices + * are: + * + * (a) whether memory utilization ratio is below certain threshold, + * (b) whether memory consumption is above certain threshold, or + * (c) some combination of the two. + * + * The caller needs to make sure that the input/output arrays are valid and + * their sizes are proper as well as matched, meaning: + * + * (a) newlen = n_pointers * sizeof(const void *) + * (b) *oldlenp = n_pointers * sizeof(size_t) * 3 + * (c) n_pointers > 0 + * + * Otherwise, the function immediately returns EINVAL without touching anything. + * + * In the rare case where there's no associated extent found for some pointers, + * rather than immediately terminating the computation and raising an error, + * the function simply zeros out the corresponding output fields and continues + * the computation until all input pointers are handled. The motivations of + * such a design are as follows: + * + * (a) The function always either processes nothing or processes everything, and + * never leaves the output half touched and half untouched. + * + * (b) It facilitates usage needs especially common in C++. A vast variety of + * C++ objects are instantiated with multiple dynamic memory allocations. For + * example, std::string and std::vector typically use at least two allocations, + * one for the metadata and one for the actual content. Other types may use + * even more allocations. When inquiring about utilization statistics, the + * caller often wants to examine into all such allocations, especially internal + * one(s), rather than just the topmost one. The issue comes when some + * implementations do certain optimizations to reduce/aggregate some internal + * allocations, e.g. putting short strings directly into the metadata, and such + * decisions are not known to the caller. Therefore, we permit pointers to + * memory usages that may not be returned by previous malloc calls, and we + * provide the caller a convenient way to identify such cases. + */ +static int +experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + assert(sizeof(extent_util_stats_t) == sizeof(size_t) * 3); + + const size_t len = newlen / sizeof(const void *); + if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0 + || newlen != len * sizeof(const void *) + || *oldlenp != len * sizeof(extent_util_stats_t)) { + ret = EINVAL; + goto label_return; + } + + void **ptrs = (void **)newp; + extent_util_stats_t *util_stats = (extent_util_stats_t *)oldp; + size_t i; + for (i = 0; i < len; ++i) { + extent_util_stats_get(tsd_tsdn(tsd), ptrs[i], + &util_stats[i].nfree, &util_stats[i].nregs, + &util_stats[i].size); + } + ret = 0; + +label_return: + return ret; +} + +static const ctl_named_node_t * +experimental_arenas_i_index(tsdn_t *tsdn, const size_t *mib, + size_t miblen, size_t i) { + const ctl_named_node_t *ret; + + malloc_mutex_lock(tsdn, &ctl_mtx); + if (ctl_arenas_i_verify(i)) { + ret = NULL; + goto label_return; + } + ret = super_experimental_arenas_i_node; +label_return: + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; +} + +static int +experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + if (!config_stats) { + return ENOENT; + } + if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(size_t *)) { + return EINVAL; + } + + unsigned arena_ind; + arena_t *arena; + int ret; + size_t *pactivep; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + READONLY(); + MIB_UNSIGNED(arena_ind, 2); + if (arena_ind < narenas_total_get() && (arena = + arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { +#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \ + defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER) + /* Expose the underlying counter for fast read. */ + pactivep = (size_t *)&(arena->nactive.repr); + READ(pactivep, size_t *); + ret = 0; +#else + ret = EFAULT; +#endif + } else { + ret = EFAULT; + } +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} diff --git a/deps/jemalloc/src/div.c b/deps/jemalloc/src/div.c new file mode 100644 index 0000000..808892a --- /dev/null +++ b/deps/jemalloc/src/div.c @@ -0,0 +1,55 @@ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/div.h" + +#include "jemalloc/internal/assert.h" + +/* + * Suppose we have n = q * d, all integers. We know n and d, and want q = n / d. + * + * For any k, we have (here, all division is exact; not C-style rounding): + * floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where + * r = (-2^k) mod d. + * + * Expanding this out: + * ... = floor(2^k / d * n / 2^k + r / d * n / 2^k) + * = floor(n / d + (r / d) * (n / 2^k)). + * + * The fractional part of n / d is 0 (because of the assumption that d divides n + * exactly), so we have: + * ... = n / d + floor((r / d) * (n / 2^k)) + * + * So that our initial expression is equal to the quantity we seek, so long as + * (r / d) * (n / 2^k) < 1. + * + * r is a remainder mod d, so r < d and r / d < 1 always. We can make + * n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works. + */ + +void +div_init(div_info_t *div_info, size_t d) { + /* Nonsensical. */ + assert(d != 0); + /* + * This would make the value of magic too high to fit into a uint32_t + * (we would want magic = 2^32 exactly). This would mess with code gen + * on 32-bit machines. + */ + assert(d != 1); + + uint64_t two_to_k = ((uint64_t)1 << 32); + uint32_t magic = (uint32_t)(two_to_k / d); + + /* + * We want magic = ceil(2^k / d), but C gives us floor. We have to + * increment it unless the result was exact (i.e. unless d is a power of + * two). + */ + if (two_to_k % d != 0) { + magic++; + } + div_info->magic = magic; +#ifdef JEMALLOC_DEBUG + div_info->d = d; +#endif +} diff --git a/deps/jemalloc/src/extent.c b/deps/jemalloc/src/extent.c new file mode 100644 index 0000000..9237f90 --- /dev/null +++ b/deps/jemalloc/src/extent.c @@ -0,0 +1,2403 @@ +#define JEMALLOC_EXTENT_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" + +/******************************************************************************/ +/* Data. */ + +rtree_t extents_rtree; +/* Keyed by the address of the extent_t being protected. */ +mutex_pool_t extent_mutex_pool; + +size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; + +static const bitmap_info_t extents_bitmap_info = + BITMAP_INFO_INITIALIZER(SC_NPSIZES+1); + +static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit, + unsigned arena_ind); +static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +static bool extent_decommit_default(extent_hooks_t *extent_hooks, + void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); +#ifdef PAGES_CAN_PURGE_LAZY +static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +#endif +static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +#ifdef PAGES_CAN_PURGE_FORCED +static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, + void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); +#endif +static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t size_a, size_t size_b, bool committed, + unsigned arena_ind); +static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, + bool growing_retained); +static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, + size_t size_a, void *addr_b, size_t size_b, bool committed, + unsigned arena_ind); +static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, + bool growing_retained); + +const extent_hooks_t extent_hooks_default = { + extent_alloc_default, + extent_dalloc_default, + extent_destroy_default, + extent_commit_default, + extent_decommit_default +#ifdef PAGES_CAN_PURGE_LAZY + , + extent_purge_lazy_default +#else + , + NULL +#endif +#ifdef PAGES_CAN_PURGE_FORCED + , + extent_purge_forced_default +#else + , + NULL +#endif + , + extent_split_default, + extent_merge_default +}; + +/* Used exclusively for gdump triggering. */ +static atomic_zu_t curpages; +static atomic_zu_t highpages; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void extent_deregister(tsdn_t *tsdn, extent_t *extent); +static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, + size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind, + bool *zero, bool *commit, bool growing_retained); +static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained); +static void extent_record(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, + bool growing_retained); + +/******************************************************************************/ + +#define ATTR_NONE /* does nothing */ + +ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link, + extent_esnead_comp) + +#undef ATTR_NONE + +typedef enum { + lock_result_success, + lock_result_failure, + lock_result_no_extent +} lock_result_t; + +static lock_result_t +extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, + extent_t **result, bool inactive_only) { + extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, + elm, true); + + /* Slab implies active extents and should be skipped. */ + if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn, + &extents_rtree, elm, true))) { + return lock_result_no_extent; + } + + /* + * It's possible that the extent changed out from under us, and with it + * the leaf->extent mapping. We have to recheck while holding the lock. + */ + extent_lock(tsdn, extent1); + extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn, + &extents_rtree, elm, true); + + if (extent1 == extent2) { + *result = extent1; + return lock_result_success; + } else { + extent_unlock(tsdn, extent1); + return lock_result_failure; + } +} + +/* + * Returns a pool-locked extent_t * if there's one associated with the given + * address, and NULL otherwise. + */ +static extent_t * +extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr, + bool inactive_only) { + extent_t *ret = NULL; + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, + rtree_ctx, (uintptr_t)addr, false, false); + if (elm == NULL) { + return NULL; + } + lock_result_t lock_result; + do { + lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret, + inactive_only); + } while (lock_result == lock_result_failure); + return ret; +} + +extent_t * +extent_alloc(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); + extent_t *extent = extent_avail_first(&arena->extent_avail); + if (extent == NULL) { + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); + return base_alloc_extent(tsdn, arena->base); + } + extent_avail_remove(&arena->extent_avail, extent); + atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED); + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); + return extent; +} + +void +extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); + extent_avail_insert(&arena->extent_avail, extent); + atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED); + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); +} + +extent_hooks_t * +extent_hooks_get(arena_t *arena) { + return base_extent_hooks_get(arena->base); +} + +extent_hooks_t * +extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) { + background_thread_info_t *info; + if (have_background_thread) { + info = arena_background_thread_info_get(arena); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + } + extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks); + if (have_background_thread) { + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + + return ret; +} + +static void +extent_hooks_assure_initialized(arena_t *arena, + extent_hooks_t **r_extent_hooks) { + if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) { + *r_extent_hooks = extent_hooks_get(arena); + } +} + +#ifndef JEMALLOC_JET +static +#endif +size_t +extent_size_quantize_floor(size_t size) { + size_t ret; + pszind_t pind; + + assert(size > 0); + assert((size & PAGE_MASK) == 0); + + pind = sz_psz2ind(size - sz_large_pad + 1); + if (pind == 0) { + /* + * Avoid underflow. This short-circuit would also do the right + * thing for all sizes in the range for which there are + * PAGE-spaced size classes, but it's simplest to just handle + * the one case that would cause erroneous results. + */ + return size; + } + ret = sz_pind2sz(pind - 1) + sz_large_pad; + assert(ret <= size); + return ret; +} + +#ifndef JEMALLOC_JET +static +#endif +size_t +extent_size_quantize_ceil(size_t size) { + size_t ret; + + assert(size > 0); + assert(size - sz_large_pad <= SC_LARGE_MAXCLASS); + assert((size & PAGE_MASK) == 0); + + ret = extent_size_quantize_floor(size); + if (ret < size) { + /* + * Skip a quantization that may have an adequately large extent, + * because under-sized extents may be mixed in. This only + * happens when an unusual size is requested, i.e. for aligned + * allocation, and is just one of several places where linear + * search would potentially find sufficiently aligned available + * memory somewhere lower. + */ + ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + + sz_large_pad; + } + return ret; +} + +/* Generate pairing heap functions. */ +ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) + +bool +extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, + bool delay_coalesce) { + if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS, + malloc_mutex_rank_exclusive)) { + return true; + } + for (unsigned i = 0; i < SC_NPSIZES + 1; i++) { + extent_heap_new(&extents->heaps[i]); + } + bitmap_init(extents->bitmap, &extents_bitmap_info, true); + extent_list_init(&extents->lru); + atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED); + extents->state = state; + extents->delay_coalesce = delay_coalesce; + return false; +} + +extent_state_t +extents_state_get(const extents_t *extents) { + return extents->state; +} + +size_t +extents_npages_get(extents_t *extents) { + return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); +} + +size_t +extents_nextents_get(extents_t *extents, pszind_t pind) { + return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED); +} + +size_t +extents_nbytes_get(extents_t *extents, pszind_t pind) { + return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED); +} + +static void +extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) { + size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED); + atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED); + cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED); + atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED); +} + +static void +extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) { + size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED); + atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED); + cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED); + atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED); +} + +static void +extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + assert(extent_state_get(extent) == extents->state); + + size_t size = extent_size_get(extent); + size_t psz = extent_size_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + if (extent_heap_empty(&extents->heaps[pind])) { + bitmap_unset(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + } + extent_heap_insert(&extents->heaps[pind], extent); + + if (config_stats) { + extents_stats_add(extents, pind, size); + } + + extent_list_append(&extents->lru, extent); + size_t npages = size >> LG_PAGE; + /* + * All modifications to npages hold the mutex (as asserted above), so we + * don't need an atomic fetch-add; we can get by with a load followed by + * a store. + */ + size_t cur_extents_npages = + atomic_load_zu(&extents->npages, ATOMIC_RELAXED); + atomic_store_zu(&extents->npages, cur_extents_npages + npages, + ATOMIC_RELAXED); +} + +static void +extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + assert(extent_state_get(extent) == extents->state); + + size_t size = extent_size_get(extent); + size_t psz = extent_size_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + extent_heap_remove(&extents->heaps[pind], extent); + + if (config_stats) { + extents_stats_sub(extents, pind, size); + } + + if (extent_heap_empty(&extents->heaps[pind])) { + bitmap_set(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + } + extent_list_remove(&extents->lru, extent); + size_t npages = size >> LG_PAGE; + /* + * As in extents_insert_locked, we hold extents->mtx and so don't need + * atomic operations for updating extents->npages. + */ + size_t cur_extents_npages = + atomic_load_zu(&extents->npages, ATOMIC_RELAXED); + assert(cur_extents_npages >= npages); + atomic_store_zu(&extents->npages, + cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); +} + +/* + * Find an extent with size [min_size, max_size) to satisfy the alignment + * requirement. For each size, try only the first extent in the heap. + */ +static extent_t * +extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size, + size_t alignment) { + pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size)); + pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size)); + + for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, + &extents_bitmap_info, (size_t)pind); i < pind_max; i = + (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, + (size_t)i+1)) { + assert(i < SC_NPSIZES); + assert(!extent_heap_empty(&extents->heaps[i])); + extent_t *extent = extent_heap_first(&extents->heaps[i]); + uintptr_t base = (uintptr_t)extent_base_get(extent); + size_t candidate_size = extent_size_get(extent); + assert(candidate_size >= min_size); + + uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base, + PAGE_CEILING(alignment)); + if (base > next_align || base + candidate_size <= next_align) { + /* Overflow or not crossing the next alignment. */ + continue; + } + + size_t leadsize = next_align - base; + if (candidate_size - leadsize >= min_size) { + return extent; + } + } + + return NULL; +} + +/* + * Do first-fit extent selection, i.e. select the oldest/lowest extent that is + * large enough. + */ +static extent_t * +extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t size) { + extent_t *ret = NULL; + + pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); + + if (!maps_coalesce && !opt_retain) { + /* + * No split / merge allowed (Windows w/o retain). Try exact fit + * only. + */ + return extent_heap_empty(&extents->heaps[pind]) ? NULL : + extent_heap_first(&extents->heaps[pind]); + } + + for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, + &extents_bitmap_info, (size_t)pind); + i < SC_NPSIZES + 1; + i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, + (size_t)i+1)) { + assert(!extent_heap_empty(&extents->heaps[i])); + extent_t *extent = extent_heap_first(&extents->heaps[i]); + assert(extent_size_get(extent) >= size); + /* + * In order to reduce fragmentation, avoid reusing and splitting + * large extents for much smaller sizes. + * + * Only do check for dirty extents (delay_coalesce). + */ + if (extents->delay_coalesce && + (sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { + break; + } + if (ret == NULL || extent_snad_comp(extent, ret) < 0) { + ret = extent; + } + if (i == SC_NPSIZES) { + break; + } + assert(i < SC_NPSIZES); + } + + return ret; +} + +/* + * Do first-fit extent selection, where the selection policy choice is + * based on extents->delay_coalesce. + */ +static extent_t * +extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t esize, size_t alignment) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + + size_t max_size = esize + PAGE_CEILING(alignment) - PAGE; + /* Beware size_t wrap-around. */ + if (max_size < esize) { + return NULL; + } + + extent_t *extent = + extents_first_fit_locked(tsdn, arena, extents, max_size); + + if (alignment > PAGE && extent == NULL) { + /* + * max_size guarantees the alignment requirement but is rather + * pessimistic. Next we try to satisfy the aligned allocation + * with sizes in [esize, max_size). + */ + extent = extents_fit_alignment(extents, esize, max_size, + alignment); + } + + return extent; +} + +static bool +extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent) { + extent_state_set(extent, extent_state_active); + bool coalesced; + extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, extent, &coalesced, false); + extent_state_set(extent, extents_state_get(extents)); + + if (!coalesced) { + return true; + } + extents_insert_locked(tsdn, extents, extent); + return false; +} + +extent_t * +extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + assert(size + pad != 0); + assert(alignment != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents, + new_addr, size, pad, alignment, slab, szind, zero, commit, false); + assert(extent == NULL || extent_dumpable_get(extent)); + return extent; +} + +void +extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent) { + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + assert(extent_dumpable_get(extent)); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_addr_set(extent, extent_base_get(extent)); + extent_zeroed_set(extent, false); + + extent_record(tsdn, arena, r_extent_hooks, extents, extent, false); +} + +extent_t * +extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, size_t npages_min) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + malloc_mutex_lock(tsdn, &extents->mtx); + + /* + * Get the LRU coalesced extent, if any. If coalescing was delayed, + * the loop will iterate until the LRU extent is fully coalesced. + */ + extent_t *extent; + while (true) { + /* Get the LRU extent, if any. */ + extent = extent_list_first(&extents->lru); + if (extent == NULL) { + goto label_return; + } + /* Check the eviction limit. */ + size_t extents_npages = atomic_load_zu(&extents->npages, + ATOMIC_RELAXED); + if (extents_npages <= npages_min) { + extent = NULL; + goto label_return; + } + extents_remove_locked(tsdn, extents, extent); + if (!extents->delay_coalesce) { + break; + } + /* Try to coalesce. */ + if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, extent)) { + break; + } + /* + * The LRU extent was just coalesced and the result placed in + * the LRU at its neighbor's position. Start over. + */ + } + + /* + * Either mark the extent active or deregister it to protect against + * concurrent operations. + */ + switch (extents_state_get(extents)) { + case extent_state_active: + not_reached(); + case extent_state_dirty: + case extent_state_muzzy: + extent_state_set(extent, extent_state_active); + break; + case extent_state_retained: + extent_deregister(tsdn, extent); + break; + default: + not_reached(); + } + +label_return: + malloc_mutex_unlock(tsdn, &extents->mtx); + return extent; +} + +/* + * This can only happen when we fail to allocate a new extent struct (which + * indicates OOM), e.g. when trying to split an existing extent. + */ +static void +extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent, bool growing_retained) { + size_t sz = extent_size_get(extent); + if (config_stats) { + arena_stats_accum_zu(&arena->stats.abandoned_vm, sz); + } + /* + * Leak extent after making sure its pages have already been purged, so + * that this is only a virtual memory leak. + */ + if (extents_state_get(extents) == extent_state_dirty) { + if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, + extent, 0, sz, growing_retained)) { + extent_purge_forced_impl(tsdn, arena, r_extent_hooks, + extent, 0, extent_size_get(extent), + growing_retained); + } + } + extent_dalloc(tsdn, arena, extent); +} + +void +extents_prefork(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_prefork(tsdn, &extents->mtx); +} + +void +extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_postfork_parent(tsdn, &extents->mtx); +} + +void +extents_postfork_child(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_postfork_child(tsdn, &extents->mtx); +} + +static void +extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent) { + assert(extent_arena_get(extent) == arena); + assert(extent_state_get(extent) == extent_state_active); + + extent_state_set(extent, extents_state_get(extents)); + extents_insert_locked(tsdn, extents, extent); +} + +static void +extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent) { + malloc_mutex_lock(tsdn, &extents->mtx); + extent_deactivate_locked(tsdn, arena, extents, extent); + malloc_mutex_unlock(tsdn, &extents->mtx); +} + +static void +extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent) { + assert(extent_arena_get(extent) == arena); + assert(extent_state_get(extent) == extents_state_get(extents)); + + extents_remove_locked(tsdn, extents, extent); + extent_state_set(extent, extent_state_active); +} + +static bool +extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, + const extent_t *extent, bool dependent, bool init_missing, + rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { + *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent), dependent, init_missing); + if (!dependent && *r_elm_a == NULL) { + return true; + } + assert(*r_elm_a != NULL); + + *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_last_get(extent), dependent, init_missing); + if (!dependent && *r_elm_b == NULL) { + return true; + } + assert(*r_elm_b != NULL); + + return false; +} + +static void +extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a, + rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { + rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); + if (elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind, + slab); + } +} + +static void +extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent, + szind_t szind) { + assert(extent_slab_get(extent)); + + /* Register interior. */ + for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { + rtree_write(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << + LG_PAGE), extent, szind, true); + } +} + +static void +extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { + cassert(config_prof); + /* prof_gdump() requirement. */ + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (opt_prof && extent_state_get(extent) == extent_state_active) { + size_t nadd = extent_size_get(extent) >> LG_PAGE; + size_t cur = atomic_fetch_add_zu(&curpages, nadd, + ATOMIC_RELAXED) + nadd; + size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED); + while (cur > high && !atomic_compare_exchange_weak_zu( + &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) { + /* + * Don't refresh cur, because it may have decreased + * since this thread lost the highpages update race. + * Note that high is updated in case of CAS failure. + */ + } + if (cur > high && prof_gdump_get_unlocked()) { + prof_gdump(tsdn); + } + } +} + +static void +extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) { + cassert(config_prof); + + if (opt_prof && extent_state_get(extent) == extent_state_active) { + size_t nsub = extent_size_get(extent) >> LG_PAGE; + assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub); + atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED); + } +} + +static bool +extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *elm_a, *elm_b; + + /* + * We need to hold the lock to protect against a concurrent coalesce + * operation that sees us in a partial state. + */ + extent_lock(tsdn, extent); + + if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true, + &elm_a, &elm_b)) { + extent_unlock(tsdn, extent); + return true; + } + + szind_t szind = extent_szind_get_maybe_invalid(extent); + bool slab = extent_slab_get(extent); + extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab); + if (slab) { + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + + extent_unlock(tsdn, extent); + + if (config_prof && gdump_add) { + extent_gdump_add(tsdn, extent); + } + + return false; +} + +static bool +extent_register(tsdn_t *tsdn, extent_t *extent) { + return extent_register_impl(tsdn, extent, true); +} + +static bool +extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) { + return extent_register_impl(tsdn, extent, false); +} + +static void +extent_reregister(tsdn_t *tsdn, extent_t *extent) { + bool err = extent_register(tsdn, extent); + assert(!err); +} + +/* + * Removes all pointers to the given extent from the global rtree indices for + * its interior. This is relevant for slab extents, for which we need to do + * metadata lookups at places other than the head of the extent. We deregister + * on the interior, then, when an extent moves from being an active slab to an + * inactive state. + */ +static void +extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, + extent_t *extent) { + size_t i; + + assert(extent_slab_get(extent)); + + for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { + rtree_clear(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << + LG_PAGE)); + } +} + +/* + * Removes all pointers to the given extent from the global rtree. + */ +static void +extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *elm_a, *elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false, + &elm_a, &elm_b); + + extent_lock(tsdn, extent); + + extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false); + if (extent_slab_get(extent)) { + extent_interior_deregister(tsdn, rtree_ctx, extent); + extent_slab_set(extent, false); + } + + extent_unlock(tsdn, extent); + + if (config_prof && gdump) { + extent_gdump_sub(tsdn, extent); + } +} + +static void +extent_deregister(tsdn_t *tsdn, extent_t *extent) { + extent_deregister_impl(tsdn, extent, true); +} + +static void +extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) { + extent_deregister_impl(tsdn, extent, false); +} + +/* + * Tries to find and remove an extent from extents that can be used for the + * given allocation request. + */ +static extent_t * +extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + assert(alignment > 0); + if (config_debug && new_addr != NULL) { + /* + * Non-NULL new_addr has two use cases: + * + * 1) Recycle a known-extant extent, e.g. during purging. + * 2) Perform in-place expanding reallocation. + * + * Regardless of use case, new_addr must either refer to a + * non-existing extent, or to the base of an extant extent, + * since only active slabs support interior lookups (which of + * course cannot be recycled). + */ + assert(PAGE_ADDR2BASE(new_addr) == new_addr); + assert(pad == 0); + assert(alignment <= PAGE); + } + + size_t esize = size + pad; + malloc_mutex_lock(tsdn, &extents->mtx); + extent_hooks_assure_initialized(arena, r_extent_hooks); + extent_t *extent; + if (new_addr != NULL) { + extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr, + false); + if (extent != NULL) { + /* + * We might null-out extent to report an error, but we + * still need to unlock the associated mutex after. + */ + extent_t *unlock_extent = extent; + assert(extent_base_get(extent) == new_addr); + if (extent_arena_get(extent) != arena || + extent_size_get(extent) < esize || + extent_state_get(extent) != + extents_state_get(extents)) { + extent = NULL; + } + extent_unlock(tsdn, unlock_extent); + } + } else { + extent = extents_fit_locked(tsdn, arena, extents, esize, + alignment); + } + if (extent == NULL) { + malloc_mutex_unlock(tsdn, &extents->mtx); + return NULL; + } + + extent_activate_locked(tsdn, arena, extents, extent); + malloc_mutex_unlock(tsdn, &extents->mtx); + + return extent; +} + +/* + * Given an allocation request and an extent guaranteed to be able to satisfy + * it, this splits off lead and trail extents, leaving extent pointing to an + * extent satisfying the allocation. + * This function doesn't put lead or trail into any extents_t; it's the caller's + * job to ensure that they can be reused. + */ +typedef enum { + /* + * Split successfully. lead, extent, and trail, are modified to extents + * describing the ranges before, in, and after the given allocation. + */ + extent_split_interior_ok, + /* + * The extent can't satisfy the given allocation request. None of the + * input extent_t *s are touched. + */ + extent_split_interior_cant_alloc, + /* + * In a potentially invalid state. Must leak (if *to_leak is non-NULL), + * and salvage what's still salvageable (if *to_salvage is non-NULL). + * None of lead, extent, or trail are valid. + */ + extent_split_interior_error +} extent_split_interior_result_t; + +static extent_split_interior_result_t +extent_split_interior(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, + /* The result of splitting, in case of success. */ + extent_t **extent, extent_t **lead, extent_t **trail, + /* The mess to clean up, in case of error. */ + extent_t **to_leak, extent_t **to_salvage, + void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, + szind_t szind, bool growing_retained) { + size_t esize = size + pad; + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent), + PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent); + assert(new_addr == NULL || leadsize == 0); + if (extent_size_get(*extent) < leadsize + esize) { + return extent_split_interior_cant_alloc; + } + size_t trailsize = extent_size_get(*extent) - leadsize - esize; + + *lead = NULL; + *trail = NULL; + *to_leak = NULL; + *to_salvage = NULL; + + /* Split the lead. */ + if (leadsize != 0) { + *lead = *extent; + *extent = extent_split_impl(tsdn, arena, r_extent_hooks, + *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind, + slab, growing_retained); + if (*extent == NULL) { + *to_leak = *lead; + *lead = NULL; + return extent_split_interior_error; + } + } + + /* Split the trail. */ + if (trailsize != 0) { + *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent, + esize, szind, slab, trailsize, SC_NSIZES, false, + growing_retained); + if (*trail == NULL) { + *to_leak = *extent; + *to_salvage = *lead; + *lead = NULL; + *extent = NULL; + return extent_split_interior_error; + } + } + + if (leadsize == 0 && trailsize == 0) { + /* + * Splitting causes szind to be set as a side effect, but no + * splitting occurred. + */ + extent_szind_set(*extent, szind); + if (szind != SC_NSIZES) { + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_addr_get(*extent), szind, slab); + if (slab && extent_size_get(*extent) > PAGE) { + rtree_szind_slab_update(tsdn, &extents_rtree, + rtree_ctx, + (uintptr_t)extent_past_get(*extent) - + (uintptr_t)PAGE, szind, slab); + } + } + } + + return extent_split_interior_ok; +} + +/* + * This fulfills the indicated allocation request out of the given extent (which + * the caller should have ensured was big enough). If there's any unused space + * before or after the resulting allocation, that space is given its own extent + * and put back into extents. + */ +static extent_t * +extent_recycle_split(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, + szind_t szind, extent_t *extent, bool growing_retained) { + extent_t *lead; + extent_t *trail; + extent_t *to_leak; + extent_t *to_salvage; + + extent_split_interior_result_t result = extent_split_interior( + tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, + &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind, + growing_retained); + + if (!maps_coalesce && result != extent_split_interior_ok + && !opt_retain) { + /* + * Split isn't supported (implies Windows w/o retain). Avoid + * leaking the extents. + */ + assert(to_leak != NULL && lead == NULL && trail == NULL); + extent_deactivate(tsdn, arena, extents, to_leak); + return NULL; + } + + if (result == extent_split_interior_ok) { + if (lead != NULL) { + extent_deactivate(tsdn, arena, extents, lead); + } + if (trail != NULL) { + extent_deactivate(tsdn, arena, extents, trail); + } + return extent; + } else { + /* + * We should have picked an extent that was large enough to + * fulfill our allocation request. + */ + assert(result == extent_split_interior_error); + if (to_salvage != NULL) { + extent_deregister(tsdn, to_salvage); + } + if (to_leak != NULL) { + void *leak = extent_base_get(to_leak); + extent_deregister_no_gdump_sub(tsdn, to_leak); + extents_abandon_vm(tsdn, arena, r_extent_hooks, extents, + to_leak, growing_retained); + assert(extent_lock_from_addr(tsdn, rtree_ctx, leak, + false) == NULL); + } + return NULL; + } + unreachable(); +} + +static bool +extent_need_manual_zero(arena_t *arena) { + /* + * Need to manually zero the extent on repopulating if either; 1) non + * default extent hooks installed (in which case the purge semantics may + * change); or 2) transparent huge pages enabled. + */ + return (!arena_has_default_hooks(arena) || + (opt_thp == thp_mode_always)); +} + +/* + * Tries to satisfy the given allocation request by reusing one of the extents + * in the given extents_t. + */ +static extent_t * +extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + assert(new_addr == NULL || !slab); + assert(pad == 0 || !slab); + assert(!*zero || !slab); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, new_addr, size, pad, alignment, slab, + growing_retained); + if (extent == NULL) { + return NULL; + } + + extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, new_addr, size, pad, alignment, slab, szind, extent, + growing_retained); + if (extent == NULL) { + return NULL; + } + + if (*commit && !extent_committed_get(extent)) { + if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, + 0, extent_size_get(extent), growing_retained)) { + extent_record(tsdn, arena, r_extent_hooks, extents, + extent, growing_retained); + return NULL; + } + if (!extent_need_manual_zero(arena)) { + extent_zeroed_set(extent, true); + } + } + + if (extent_committed_get(extent)) { + *commit = true; + } + if (extent_zeroed_get(extent)) { + *zero = true; + } + + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + assert(extent_state_get(extent) == extent_state_active); + if (slab) { + extent_slab_set(extent, slab); + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + + if (*zero) { + void *addr = extent_base_get(extent); + if (!extent_zeroed_get(extent)) { + size_t size = extent_size_get(extent); + if (extent_need_manual_zero(arena) || + pages_purge_forced(addr, size)) { + memset(addr, 0, size); + } + } else if (config_debug) { + size_t *p = (size_t *)(uintptr_t)addr; + /* Check the first page only. */ + for (size_t i = 0; i < PAGE / sizeof(size_t); i++) { + assert(p[i] == 0); + } + } + } + return extent; +} + +/* + * If the caller specifies (!*zero), it is still possible to receive zeroed + * memory, in which case *zero is toggled to true. arena_extent_alloc() takes + * advantage of this to avoid demanding zeroed extents, but taking advantage of + * them if they are returned. + */ +static void * +extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { + void *ret; + + assert(size != 0); + assert(alignment != 0); + + /* "primary" dss. */ + if (have_dss && dss_prec == dss_prec_primary && (ret = + extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) { + return ret; + } + /* mmap. */ + if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) + != NULL) { + return ret; + } + /* "secondary" dss. */ + if (have_dss && dss_prec == dss_prec_secondary && (ret = + extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) { + return ret; + } + + /* All strategies for allocation failed. */ + return NULL; +} + +static void * +extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit) { + void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, + commit, (dss_prec_t)atomic_load_u(&arena->dss_prec, + ATOMIC_RELAXED)); + if (have_madvise_huge && ret) { + pages_set_thp_state(ret, size); + } + return ret; +} + +static void * +extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { + tsdn_t *tsdn; + arena_t *arena; + + tsdn = tsdn_fetch(); + arena = arena_get(tsdn, arena_ind, false); + /* + * The arena we're allocating on behalf of must have been initialized + * already. + */ + assert(arena != NULL); + + return extent_alloc_default_impl(tsdn, arena, new_addr, size, + ALIGNMENT_CEILING(alignment, PAGE), zero, commit); +} + +static void +extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + if (arena == arena_get(tsd_tsdn(tsd), 0, false)) { + /* + * The only legitimate case of customized extent hooks for a0 is + * hooks with no allocation activities. One such example is to + * place metadata on pre-allocated resources such as huge pages. + * In that case, rely on reentrancy_level checks to catch + * infinite recursions. + */ + pre_reentrancy(tsd, NULL); + } else { + pre_reentrancy(tsd, arena); + } +} + +static void +extent_hook_post_reentrancy(tsdn_t *tsdn) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + post_reentrancy(tsd); +} + +/* + * If virtual memory is retained, create increasingly larger extents from which + * to split requested extents in order to limit the total number of disjoint + * virtual memory ranges retained by each arena. + */ +static extent_t * +extent_grow_retained(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment, + bool slab, szind_t szind, bool *zero, bool *commit) { + malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx); + assert(pad == 0 || !slab); + assert(!*zero || !slab); + + size_t esize = size + pad; + size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE; + /* Beware size_t wrap-around. */ + if (alloc_size_min < esize) { + goto label_err; + } + /* + * Find the next extent size in the series that would be large enough to + * satisfy this request. + */ + pszind_t egn_skip = 0; + size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); + while (alloc_size < alloc_size_min) { + egn_skip++; + if (arena->extent_grow_next + egn_skip >= + sz_psz2ind(SC_LARGE_MAXCLASS)) { + /* Outside legal range. */ + goto label_err; + } + alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); + } + + extent_t *extent = extent_alloc(tsdn, arena); + if (extent == NULL) { + goto label_err; + } + bool zeroed = false; + bool committed = false; + + void *ptr; + if (*r_extent_hooks == &extent_hooks_default) { + ptr = extent_alloc_default_impl(tsdn, arena, NULL, + alloc_size, PAGE, &zeroed, &committed); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, + alloc_size, PAGE, &zeroed, &committed, + arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES, + arena_extent_sn_next(arena), extent_state_active, zeroed, + committed, true, EXTENT_IS_HEAD); + if (ptr == NULL) { + extent_dalloc(tsdn, arena, extent); + goto label_err; + } + + if (extent_register_no_gdump_add(tsdn, extent)) { + extent_dalloc(tsdn, arena, extent); + goto label_err; + } + + if (extent_zeroed_get(extent) && extent_committed_get(extent)) { + *zero = true; + } + if (extent_committed_get(extent)) { + *commit = true; + } + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *lead; + extent_t *trail; + extent_t *to_leak; + extent_t *to_salvage; + extent_split_interior_result_t result = extent_split_interior( + tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, + &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind, + true); + + if (result == extent_split_interior_ok) { + if (lead != NULL) { + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, lead, true); + } + if (trail != NULL) { + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, trail, true); + } + } else { + /* + * We should have allocated a sufficiently large extent; the + * cant_alloc case should not occur. + */ + assert(result == extent_split_interior_error); + if (to_salvage != NULL) { + if (config_prof) { + extent_gdump_add(tsdn, to_salvage); + } + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, to_salvage, true); + } + if (to_leak != NULL) { + extent_deregister_no_gdump_sub(tsdn, to_leak); + extents_abandon_vm(tsdn, arena, r_extent_hooks, + &arena->extents_retained, to_leak, true); + } + goto label_err; + } + + if (*commit && !extent_committed_get(extent)) { + if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, + extent_size_get(extent), true)) { + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, true); + goto label_err; + } + if (!extent_need_manual_zero(arena)) { + extent_zeroed_set(extent, true); + } + } + + /* + * Increment extent_grow_next if doing so wouldn't exceed the allowed + * range. + */ + if (arena->extent_grow_next + egn_skip + 1 <= + arena->retain_grow_limit) { + arena->extent_grow_next += egn_skip + 1; + } else { + arena->extent_grow_next = arena->retain_grow_limit; + } + /* All opportunities for failure are past. */ + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + + if (config_prof) { + /* Adjust gdump stats now that extent is final size. */ + extent_gdump_add(tsdn, extent); + } + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + if (slab) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, + &rtree_ctx_fallback); + + extent_slab_set(extent, true); + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + if (*zero && !extent_zeroed_get(extent)) { + void *addr = extent_base_get(extent); + size_t size = extent_size_get(extent); + if (extent_need_manual_zero(arena) || + pages_purge_forced(addr, size)) { + memset(addr, 0, size); + } + } + + return extent; +label_err: + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + return NULL; +} + +static extent_t * +extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + assert(size != 0); + assert(alignment != 0); + + malloc_mutex_lock(tsdn, &arena->extent_grow_mtx); + + extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, + &arena->extents_retained, new_addr, size, pad, alignment, slab, + szind, zero, commit, true); + if (extent != NULL) { + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + if (config_prof) { + extent_gdump_add(tsdn, extent); + } + } else if (opt_retain && new_addr == NULL) { + extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size, + pad, alignment, slab, szind, zero, commit); + /* extent_grow_retained() always releases extent_grow_mtx. */ + } else { + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + } + malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx); + + return extent; +} + +static extent_t * +extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + size_t esize = size + pad; + extent_t *extent = extent_alloc(tsdn, arena); + if (extent == NULL) { + return NULL; + } + void *addr; + size_t palignment = ALIGNMENT_CEILING(alignment, PAGE); + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, + palignment, zero, commit); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, + esize, palignment, zero, commit, arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + if (addr == NULL) { + extent_dalloc(tsdn, arena, extent); + return NULL; + } + extent_init(extent, arena, addr, esize, slab, szind, + arena_extent_sn_next(arena), extent_state_active, *zero, *commit, + true, EXTENT_NOT_HEAD); + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + if (extent_register(tsdn, extent)) { + extent_dalloc(tsdn, arena, extent); + return NULL; + } + + return extent; +} + +extent_t * +extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks, + new_addr, size, pad, alignment, slab, szind, zero, commit); + if (extent == NULL) { + if (opt_retain && new_addr != NULL) { + /* + * When retain is enabled and new_addr is set, we do not + * attempt extent_alloc_wrapper_hard which does mmap + * that is very unlikely to succeed (unless it happens + * to be at the end). + */ + return NULL; + } + extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks, + new_addr, size, pad, alignment, slab, szind, zero, commit); + } + + assert(extent == NULL || extent_dumpable_get(extent)); + return extent; +} + +static bool +extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner, + const extent_t *outer) { + assert(extent_arena_get(inner) == arena); + if (extent_arena_get(outer) != arena) { + return false; + } + + assert(extent_state_get(inner) == extent_state_active); + if (extent_state_get(outer) != extents->state) { + return false; + } + + if (extent_committed_get(inner) != extent_committed_get(outer)) { + return false; + } + + return true; +} + +static bool +extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *inner, extent_t *outer, bool forward, + bool growing_retained) { + assert(extent_can_coalesce(arena, extents, inner, outer)); + + extent_activate_locked(tsdn, arena, extents, outer); + + malloc_mutex_unlock(tsdn, &extents->mtx); + bool err = extent_merge_impl(tsdn, arena, r_extent_hooks, + forward ? inner : outer, forward ? outer : inner, growing_retained); + malloc_mutex_lock(tsdn, &extents->mtx); + + if (err) { + extent_deactivate_locked(tsdn, arena, extents, outer); + } + + return err; +} + +static extent_t * +extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained, + bool inactive_only) { + /* + * We avoid checking / locking inactive neighbors for large size + * classes, since they are eagerly coalesced on deallocation which can + * cause lock contention. + */ + /* + * Continue attempting to coalesce until failure, to protect against + * races with other threads that are thwarted by this one. + */ + bool again; + do { + again = false; + + /* Try to coalesce forward. */ + extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, + extent_past_get(extent), inactive_only); + if (next != NULL) { + /* + * extents->mtx only protects against races for + * like-state extents, so call extent_can_coalesce() + * before releasing next's pool lock. + */ + bool can_coalesce = extent_can_coalesce(arena, extents, + extent, next); + + extent_unlock(tsdn, next); + + if (can_coalesce && !extent_coalesce(tsdn, arena, + r_extent_hooks, extents, extent, next, true, + growing_retained)) { + if (extents->delay_coalesce) { + /* Do minimal coalescing. */ + *coalesced = true; + return extent; + } + again = true; + } + } + + /* Try to coalesce backward. */ + extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx, + extent_before_get(extent), inactive_only); + if (prev != NULL) { + bool can_coalesce = extent_can_coalesce(arena, extents, + extent, prev); + extent_unlock(tsdn, prev); + + if (can_coalesce && !extent_coalesce(tsdn, arena, + r_extent_hooks, extents, extent, prev, false, + growing_retained)) { + extent = prev; + if (extents->delay_coalesce) { + /* Do minimal coalescing. */ + *coalesced = true; + return extent; + } + again = true; + } + } + } while (again); + + if (extents->delay_coalesce) { + *coalesced = false; + } + return extent; +} + +static extent_t * +extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained) { + return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, extent, coalesced, growing_retained, false); +} + +static extent_t * +extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained) { + return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, extent, coalesced, growing_retained, true); +} + +/* + * Does the metadata management portions of putting an unused extent into the + * given extents_t (coalesces, deregisters slab interiors, the heap operations). + */ +static void +extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent, bool growing_retained) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + assert((extents_state_get(extents) != extent_state_dirty && + extents_state_get(extents) != extent_state_muzzy) || + !extent_zeroed_get(extent)); + + malloc_mutex_lock(tsdn, &extents->mtx); + extent_hooks_assure_initialized(arena, r_extent_hooks); + + extent_szind_set(extent, SC_NSIZES); + if (extent_slab_get(extent)) { + extent_interior_deregister(tsdn, rtree_ctx, extent); + extent_slab_set(extent, false); + } + + assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent), true) == extent); + + if (!extents->delay_coalesce) { + extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, extent, NULL, growing_retained); + } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) { + assert(extents == &arena->extents_dirty); + /* Always coalesce large extents eagerly. */ + bool coalesced; + do { + assert(extent_state_get(extent) == extent_state_active); + extent = extent_try_coalesce_large(tsdn, arena, + r_extent_hooks, rtree_ctx, extents, extent, + &coalesced, growing_retained); + } while (coalesced); + if (extent_size_get(extent) >= oversize_threshold) { + /* Shortcut to purge the oversize extent eagerly. */ + malloc_mutex_unlock(tsdn, &extents->mtx); + arena_decay_extent(tsdn, arena, r_extent_hooks, extent); + return; + } + } + extent_deactivate_locked(tsdn, arena, extents, extent); + + malloc_mutex_unlock(tsdn, &extents->mtx); +} + +void +extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (extent_register(tsdn, extent)) { + extent_dalloc(tsdn, arena, extent); + return; + } + extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); +} + +static bool +extent_may_dalloc(void) { + /* With retain enabled, the default dalloc always fails. */ + return !opt_retain; +} + +static bool +extent_dalloc_default_impl(void *addr, size_t size) { + if (!have_dss || !extent_in_dss(addr)) { + return extent_dalloc_mmap(addr, size); + } + return true; +} + +static bool +extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + return extent_dalloc_default_impl(addr, size); +} + +static bool +extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + bool err; + + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_addr_set(extent, extent_base_get(extent)); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + /* Try to deallocate. */ + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + err = extent_dalloc_default_impl(extent_base_get(extent), + extent_size_get(extent)); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + err = ((*r_extent_hooks)->dalloc == NULL || + (*r_extent_hooks)->dalloc(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), + extent_committed_get(extent), arena_ind_get(arena))); + extent_hook_post_reentrancy(tsdn); + } + + if (!err) { + extent_dalloc(tsdn, arena, extent); + } + + return err; +} + +void +extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + assert(extent_dumpable_get(extent)); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + /* Avoid calling the default extent_dalloc unless have to. */ + if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) { + /* + * Deregister first to avoid a race with other allocating + * threads, and reregister if deallocation fails. + */ + extent_deregister(tsdn, extent); + if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, + extent)) { + return; + } + extent_reregister(tsdn, extent); + } + + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + /* Try to decommit; purge if that fails. */ + bool zeroed; + if (!extent_committed_get(extent)) { + zeroed = true; + } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, + 0, extent_size_get(extent))) { + zeroed = true; + } else if ((*r_extent_hooks)->purge_forced != NULL && + !(*r_extent_hooks)->purge_forced(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), 0, + extent_size_get(extent), arena_ind_get(arena))) { + zeroed = true; + } else if (extent_state_get(extent) == extent_state_muzzy || + ((*r_extent_hooks)->purge_lazy != NULL && + !(*r_extent_hooks)->purge_lazy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), 0, + extent_size_get(extent), arena_ind_get(arena)))) { + zeroed = false; + } else { + zeroed = false; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_zeroed_set(extent, zeroed); + + if (config_prof) { + extent_gdump_sub(tsdn, extent); + } + + extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, + extent, false); +} + +static void +extent_destroy_default_impl(void *addr, size_t size) { + if (!have_dss || !extent_in_dss(addr)) { + pages_unmap(addr, size); + } +} + +static void +extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + extent_destroy_default_impl(addr, size); +} + +void +extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + /* Deregister first to avoid a race with other allocating threads. */ + extent_deregister(tsdn, extent); + + extent_addr_set(extent, extent_base_get(extent)); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + /* Try to destroy; silently fail otherwise. */ + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + extent_destroy_default_impl(extent_base_get(extent), + extent_size_get(extent)); + } else if ((*r_extent_hooks)->destroy != NULL) { + extent_hook_pre_reentrancy(tsdn, arena); + (*r_extent_hooks)->destroy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), + extent_committed_get(extent), arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + extent_dalloc(tsdn, arena, extent); +} + +static bool +extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} + +static bool +extent_commit_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = ((*r_extent_hooks)->commit == NULL || + (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), + extent_size_get(extent), offset, length, arena_ind_get(arena))); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_committed_set(extent, extent_committed_get(extent) || !err); + return err; +} + +bool +extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset, + length, false); +} + +static bool +extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} + +bool +extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = ((*r_extent_hooks)->decommit == NULL || + (*r_extent_hooks)->decommit(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena))); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_committed_set(extent, extent_committed_get(extent) && err); + return err; +} + +#ifdef PAGES_CAN_PURGE_LAZY +static bool +extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + assert(addr != NULL); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + + return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} +#endif + +static bool +extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->purge_lazy == NULL) { + return true; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + + return err; +} + +bool +extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, + offset, length, false); +} + +#ifdef PAGES_CAN_PURGE_FORCED +static bool +extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind) { + assert(addr != NULL); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + + return pages_purge_forced((void *)((uintptr_t)addr + + (uintptr_t)offset), length); +} +#endif + +static bool +extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->purge_forced == NULL) { + return true; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + return err; +} + +bool +extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, + offset, length, false); +} + +static bool +extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { + if (!maps_coalesce) { + /* + * Without retain, only whole regions can be purged (required by + * MEM_RELEASE on Windows) -- therefore disallow splitting. See + * comments in extent_head_no_merge(). + */ + return !opt_retain; + } + + return false; +} + +/* + * Accepts the extent to split, and the characteristics of each side of the + * split. The 'a' parameters go with the 'lead' of the resulting pair of + * extents (the lower addressed portion of the split), and the 'b' parameters go + * with the trail (the higher addressed portion). This makes 'extent' the lead, + * and returns the trail (except in case of error). + */ +static extent_t * +extent_split_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, + bool growing_retained) { + assert(extent_size_get(extent) == size_a + size_b); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->split == NULL) { + return NULL; + } + + extent_t *trail = extent_alloc(tsdn, arena); + if (trail == NULL) { + goto label_error_a; + } + + extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + + size_a), size_b, slab_b, szind_b, extent_sn_get(extent), + extent_state_get(extent), extent_zeroed_get(extent), + extent_committed_get(extent), extent_dumpable_get(extent), + EXTENT_NOT_HEAD); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *lead_elm_a, *lead_elm_b; + { + extent_t lead; + + extent_init(&lead, arena, extent_addr_get(extent), size_a, + slab_a, szind_a, extent_sn_get(extent), + extent_state_get(extent), extent_zeroed_get(extent), + extent_committed_get(extent), extent_dumpable_get(extent), + EXTENT_NOT_HEAD); + + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, + true, &lead_elm_a, &lead_elm_b); + } + rtree_leaf_elm_t *trail_elm_a, *trail_elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true, + &trail_elm_a, &trail_elm_b); + + if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL + || trail_elm_b == NULL) { + goto label_error_b; + } + + extent_lock2(tsdn, extent, trail); + + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), + size_a + size_b, size_a, size_b, extent_committed_get(extent), + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + if (err) { + goto label_error_c; + } + + extent_size_set(extent, size_a); + extent_szind_set(extent, szind_a); + + extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent, + szind_a, slab_a); + extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail, + szind_b, slab_b); + + extent_unlock2(tsdn, extent, trail); + + return trail; +label_error_c: + extent_unlock2(tsdn, extent, trail); +label_error_b: + extent_dalloc(tsdn, arena, trail); +label_error_a: + return NULL; +} + +extent_t * +extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) { + return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a, + szind_a, slab_a, size_b, szind_b, slab_b, false); +} + +static bool +extent_merge_default_impl(void *addr_a, void *addr_b) { + if (!maps_coalesce && !opt_retain) { + return true; + } + if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { + return true; + } + + return false; +} + +/* + * Returns true if the given extents can't be merged because of their head bit + * settings. Assumes the second extent has the higher address. + */ +static bool +extent_head_no_merge(extent_t *a, extent_t *b) { + assert(extent_base_get(a) < extent_base_get(b)); + /* + * When coalesce is not always allowed (Windows), only merge extents + * from the same VirtualAlloc region under opt.retain (in which case + * MEM_DECOMMIT is utilized for purging). + */ + if (maps_coalesce) { + return false; + } + if (!opt_retain) { + return true; + } + /* If b is a head extent, disallow the cross-region merge. */ + if (extent_is_head_get(b)) { + /* + * Additionally, sn should not overflow with retain; sanity + * check that different regions have unique sn. + */ + assert(extent_sn_comp(a, b) != 0); + return true; + } + assert(extent_sn_comp(a, b) == 0); + + return false; +} + +static bool +extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { + if (!maps_coalesce) { + tsdn_t *tsdn = tsdn_fetch(); + extent_t *a = iealloc(tsdn, addr_a); + extent_t *b = iealloc(tsdn, addr_b); + if (extent_head_no_merge(a, b)) { + return true; + } + } + return extent_merge_default_impl(addr_a, addr_b); +} + +static bool +extent_merge_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + assert(extent_base_get(a) < extent_base_get(b)); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) { + return true; + } + + bool err; + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + err = extent_merge_default_impl(extent_base_get(a), + extent_base_get(b)); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + err = (*r_extent_hooks)->merge(*r_extent_hooks, + extent_base_get(a), extent_size_get(a), extent_base_get(b), + extent_size_get(b), extent_committed_get(a), + arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + if (err) { + return true; + } + + /* + * The rtree writes must happen while all the relevant elements are + * owned, so the following code uses decomposed helper functions rather + * than extent_{,de}register() to do things in the right order. + */ + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a, + &a_elm_b); + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a, + &b_elm_b); + + extent_lock2(tsdn, a, b); + + if (a_elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, + SC_NSIZES, false); + } + if (b_elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, + SC_NSIZES, false); + } else { + b_elm_b = b_elm_a; + } + + extent_size_set(a, extent_size_get(a) + extent_size_get(b)); + extent_szind_set(a, SC_NSIZES); + extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? + extent_sn_get(a) : extent_sn_get(b)); + extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); + + extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES, + false); + + extent_unlock2(tsdn, a, b); + + extent_dalloc(tsdn, extent_arena_get(b), b); + + return false; +} + +bool +extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) { + return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false); +} + +bool +extent_boot(void) { + if (rtree_new(&extents_rtree, true)) { + return true; + } + + if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool", + WITNESS_RANK_EXTENT_POOL)) { + return true; + } + + if (have_dss) { + extent_dss_boot(); + } + + return false; +} + +void +extent_util_stats_get(tsdn_t *tsdn, const void *ptr, + size_t *nfree, size_t *nregs, size_t *size) { + assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL); + + const extent_t *extent = iealloc(tsdn, ptr); + if (unlikely(extent == NULL)) { + *nfree = *nregs = *size = 0; + return; + } + + *size = extent_size_get(extent); + if (!extent_slab_get(extent)) { + *nfree = 0; + *nregs = 1; + } else { + *nfree = extent_nfree_get(extent); + *nregs = bin_infos[extent_szind_get(extent)].nregs; + assert(*nfree <= *nregs); + assert(*nfree * extent_usize_get(extent) <= *size); + } +} + +void +extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr, + size_t *nfree, size_t *nregs, size_t *size, + size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) { + assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL + && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL); + + const extent_t *extent = iealloc(tsdn, ptr); + if (unlikely(extent == NULL)) { + *nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0; + *slabcur_addr = NULL; + return; + } + + *size = extent_size_get(extent); + if (!extent_slab_get(extent)) { + *nfree = *bin_nfree = *bin_nregs = 0; + *nregs = 1; + *slabcur_addr = NULL; + return; + } + + *nfree = extent_nfree_get(extent); + const szind_t szind = extent_szind_get(extent); + *nregs = bin_infos[szind].nregs; + assert(*nfree <= *nregs); + assert(*nfree * extent_usize_get(extent) <= *size); + + const arena_t *arena = extent_arena_get(extent); + assert(arena != NULL); + const unsigned binshard = extent_binshard_get(extent); + bin_t *bin = &arena->bins[szind].bin_shards[binshard]; + + malloc_mutex_lock(tsdn, &bin->lock); + if (config_stats) { + *bin_nregs = *nregs * bin->stats.curslabs; + assert(*bin_nregs >= bin->stats.curregs); + *bin_nfree = *bin_nregs - bin->stats.curregs; + } else { + *bin_nfree = *bin_nregs = 0; + } + *slabcur_addr = extent_addr_get(bin->slabcur); + assert(*slabcur_addr != NULL); + malloc_mutex_unlock(tsdn, &bin->lock); +} diff --git a/deps/jemalloc/src/extent_dss.c b/deps/jemalloc/src/extent_dss.c new file mode 100644 index 0000000..8581789 --- /dev/null +++ b/deps/jemalloc/src/extent_dss.c @@ -0,0 +1,271 @@ +#define JEMALLOC_EXTENT_DSS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/spin.h" + +/******************************************************************************/ +/* Data. */ + +const char *opt_dss = DSS_DEFAULT; + +const char *dss_prec_names[] = { + "disabled", + "primary", + "secondary", + "N/A" +}; + +/* + * Current dss precedence default, used when creating new arenas. NB: This is + * stored as unsigned rather than dss_prec_t because in principle there's no + * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use + * atomic operations to synchronize the setting. + */ +static atomic_u_t dss_prec_default = ATOMIC_INIT( + (unsigned)DSS_PREC_DEFAULT); + +/* Base address of the DSS. */ +static void *dss_base; +/* Atomic boolean indicating whether a thread is currently extending DSS. */ +static atomic_b_t dss_extending; +/* Atomic boolean indicating whether the DSS is exhausted. */ +static atomic_b_t dss_exhausted; +/* Atomic current upper limit on DSS addresses. */ +static atomic_p_t dss_max; + +/******************************************************************************/ + +static void * +extent_dss_sbrk(intptr_t increment) { +#ifdef JEMALLOC_DSS + return sbrk(increment); +#else + not_implemented(); + return NULL; +#endif +} + +dss_prec_t +extent_dss_prec_get(void) { + dss_prec_t ret; + + if (!have_dss) { + return dss_prec_disabled; + } + ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE); + return ret; +} + +bool +extent_dss_prec_set(dss_prec_t dss_prec) { + if (!have_dss) { + return (dss_prec != dss_prec_disabled); + } + atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE); + return false; +} + +static void +extent_dss_extending_start(void) { + spin_t spinner = SPIN_INITIALIZER; + while (true) { + bool expected = false; + if (atomic_compare_exchange_weak_b(&dss_extending, &expected, + true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) { + break; + } + spin_adaptive(&spinner); + } +} + +static void +extent_dss_extending_finish(void) { + assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED)); + + atomic_store_b(&dss_extending, false, ATOMIC_RELEASE); +} + +static void * +extent_dss_max_update(void *new_addr) { + /* + * Get the current end of the DSS as max_cur and assure that dss_max is + * up to date. + */ + void *max_cur = extent_dss_sbrk(0); + if (max_cur == (void *)-1) { + return NULL; + } + atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE); + /* Fixed new_addr can only be supported if it is at the edge of DSS. */ + if (new_addr != NULL && max_cur != new_addr) { + return NULL; + } + return max_cur; +} + +void * +extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit) { + extent_t *gap; + + cassert(have_dss); + assert(size > 0); + assert(alignment == ALIGNMENT_CEILING(alignment, PAGE)); + + /* + * sbrk() uses a signed increment argument, so take care not to + * interpret a large allocation request as a negative increment. + */ + if ((intptr_t)size < 0) { + return NULL; + } + + gap = extent_alloc(tsdn, arena); + if (gap == NULL) { + return NULL; + } + + extent_dss_extending_start(); + if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) { + /* + * The loop is necessary to recover from races with other + * threads that are using the DSS for something other than + * malloc. + */ + while (true) { + void *max_cur = extent_dss_max_update(new_addr); + if (max_cur == NULL) { + goto label_oom; + } + + /* + * Compute how much page-aligned gap space (if any) is + * necessary to satisfy alignment. This space can be + * recycled for later use. + */ + void *gap_addr_page = (void *)(PAGE_CEILING( + (uintptr_t)max_cur)); + void *ret = (void *)ALIGNMENT_CEILING( + (uintptr_t)gap_addr_page, alignment); + size_t gap_size_page = (uintptr_t)ret - + (uintptr_t)gap_addr_page; + if (gap_size_page != 0) { + extent_init(gap, arena, gap_addr_page, + gap_size_page, false, SC_NSIZES, + arena_extent_sn_next(arena), + extent_state_active, false, true, true, + EXTENT_NOT_HEAD); + } + /* + * Compute the address just past the end of the desired + * allocation space. + */ + void *dss_next = (void *)((uintptr_t)ret + size); + if ((uintptr_t)ret < (uintptr_t)max_cur || + (uintptr_t)dss_next < (uintptr_t)max_cur) { + goto label_oom; /* Wrap-around. */ + } + /* Compute the increment, including subpage bytes. */ + void *gap_addr_subpage = max_cur; + size_t gap_size_subpage = (uintptr_t)ret - + (uintptr_t)gap_addr_subpage; + intptr_t incr = gap_size_subpage + size; + + assert((uintptr_t)max_cur + incr == (uintptr_t)ret + + size); + + /* Try to allocate. */ + void *dss_prev = extent_dss_sbrk(incr); + if (dss_prev == max_cur) { + /* Success. */ + atomic_store_p(&dss_max, dss_next, + ATOMIC_RELEASE); + extent_dss_extending_finish(); + + if (gap_size_page != 0) { + extent_dalloc_gap(tsdn, arena, gap); + } else { + extent_dalloc(tsdn, arena, gap); + } + if (!*commit) { + *commit = pages_decommit(ret, size); + } + if (*zero && *commit) { + extent_hooks_t *extent_hooks = + EXTENT_HOOKS_INITIALIZER; + extent_t extent; + + extent_init(&extent, arena, ret, size, + size, false, SC_NSIZES, + extent_state_active, false, true, + true, EXTENT_NOT_HEAD); + if (extent_purge_forced_wrapper(tsdn, + arena, &extent_hooks, &extent, 0, + size)) { + memset(ret, 0, size); + } + } + return ret; + } + /* + * Failure, whether due to OOM or a race with a raw + * sbrk() call from outside the allocator. + */ + if (dss_prev == (void *)-1) { + /* OOM. */ + atomic_store_b(&dss_exhausted, true, + ATOMIC_RELEASE); + goto label_oom; + } + } + } +label_oom: + extent_dss_extending_finish(); + extent_dalloc(tsdn, arena, gap); + return NULL; +} + +static bool +extent_in_dss_helper(void *addr, void *max) { + return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr < + (uintptr_t)max); +} + +bool +extent_in_dss(void *addr) { + cassert(have_dss); + + return extent_in_dss_helper(addr, atomic_load_p(&dss_max, + ATOMIC_ACQUIRE)); +} + +bool +extent_dss_mergeable(void *addr_a, void *addr_b) { + void *max; + + cassert(have_dss); + + if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b < + (uintptr_t)dss_base) { + return true; + } + + max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE); + return (extent_in_dss_helper(addr_a, max) == + extent_in_dss_helper(addr_b, max)); +} + +void +extent_dss_boot(void) { + cassert(have_dss); + + dss_base = extent_dss_sbrk(0); + atomic_store_b(&dss_extending, false, ATOMIC_RELAXED); + atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED); + atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED); +} + +/******************************************************************************/ diff --git a/deps/jemalloc/src/extent_mmap.c b/deps/jemalloc/src/extent_mmap.c new file mode 100644 index 0000000..17fd1c8 --- /dev/null +++ b/deps/jemalloc/src/extent_mmap.c @@ -0,0 +1,42 @@ +#define JEMALLOC_EXTENT_MMAP_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_mmap.h" + +/******************************************************************************/ +/* Data. */ + +bool opt_retain = +#ifdef JEMALLOC_RETAIN + true +#else + false +#endif + ; + +/******************************************************************************/ + +void * +extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, + bool *commit) { + assert(alignment == ALIGNMENT_CEILING(alignment, PAGE)); + void *ret = pages_map(new_addr, size, alignment, commit); + if (ret == NULL) { + return NULL; + } + assert(ret != NULL); + if (*commit) { + *zero = true; + } + return ret; +} + +bool +extent_dalloc_mmap(void *addr, size_t size) { + if (!opt_retain) { + pages_unmap(addr, size); + } + return opt_retain; +} diff --git a/deps/jemalloc/src/hash.c b/deps/jemalloc/src/hash.c new file mode 100644 index 0000000..7b2bdc2 --- /dev/null +++ b/deps/jemalloc/src/hash.c @@ -0,0 +1,3 @@ +#define JEMALLOC_HASH_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/deps/jemalloc/src/hook.c b/deps/jemalloc/src/hook.c new file mode 100644 index 0000000..9ac703c --- /dev/null +++ b/deps/jemalloc/src/hook.c @@ -0,0 +1,195 @@ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/hook.h" + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/seq.h" + +typedef struct hooks_internal_s hooks_internal_t; +struct hooks_internal_s { + hooks_t hooks; + bool in_use; +}; + +seq_define(hooks_internal_t, hooks) + +static atomic_u_t nhooks = ATOMIC_INIT(0); +static seq_hooks_t hooks[HOOK_MAX]; +static malloc_mutex_t hooks_mu; + +bool +hook_boot() { + return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK, + malloc_mutex_rank_exclusive); +} + +static void * +hook_install_locked(hooks_t *to_install) { + hooks_internal_t hooks_internal; + for (int i = 0; i < HOOK_MAX; i++) { + bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]); + /* We hold mu; no concurrent access. */ + assert(success); + if (!hooks_internal.in_use) { + hooks_internal.hooks = *to_install; + hooks_internal.in_use = true; + seq_store_hooks(&hooks[i], &hooks_internal); + atomic_store_u(&nhooks, + atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1, + ATOMIC_RELAXED); + return &hooks[i]; + } + } + return NULL; +} + +void * +hook_install(tsdn_t *tsdn, hooks_t *to_install) { + malloc_mutex_lock(tsdn, &hooks_mu); + void *ret = hook_install_locked(to_install); + if (ret != NULL) { + tsd_global_slow_inc(tsdn); + } + malloc_mutex_unlock(tsdn, &hooks_mu); + return ret; +} + +static void +hook_remove_locked(seq_hooks_t *to_remove) { + hooks_internal_t hooks_internal; + bool success = seq_try_load_hooks(&hooks_internal, to_remove); + /* We hold mu; no concurrent access. */ + assert(success); + /* Should only remove hooks that were added. */ + assert(hooks_internal.in_use); + hooks_internal.in_use = false; + seq_store_hooks(to_remove, &hooks_internal); + atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1, + ATOMIC_RELAXED); +} + +void +hook_remove(tsdn_t *tsdn, void *opaque) { + if (config_debug) { + char *hooks_begin = (char *)&hooks[0]; + char *hooks_end = (char *)&hooks[HOOK_MAX]; + char *hook = (char *)opaque; + assert(hooks_begin <= hook && hook < hooks_end + && (hook - hooks_begin) % sizeof(seq_hooks_t) == 0); + } + malloc_mutex_lock(tsdn, &hooks_mu); + hook_remove_locked((seq_hooks_t *)opaque); + tsd_global_slow_dec(tsdn); + malloc_mutex_unlock(tsdn, &hooks_mu); +} + +#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \ +for (int for_each_hook_counter = 0; \ + for_each_hook_counter < HOOK_MAX; \ + for_each_hook_counter++) { \ + bool for_each_hook_success = seq_try_load_hooks( \ + (hooks_internal_ptr), &hooks[for_each_hook_counter]); \ + if (!for_each_hook_success) { \ + continue; \ + } \ + if (!(hooks_internal_ptr)->in_use) { \ + continue; \ + } +#define FOR_EACH_HOOK_END \ +} + +static bool * +hook_reentrantp() { + /* + * We prevent user reentrancy within hooks. This is basically just a + * thread-local bool that triggers an early-exit. + * + * We don't fold in_hook into reentrancy. There are two reasons for + * this: + * - Right now, we turn on reentrancy during things like extent hook + * execution. Allocating during extent hooks is not officially + * supported, but we don't want to break it for the time being. These + * sorts of allocations should probably still be hooked, though. + * - If a hook allocates, we may want it to be relatively fast (after + * all, it executes on every allocator operation). Turning on + * reentrancy is a fairly heavyweight mode (disabling tcache, + * redirecting to arena 0, etc.). It's possible we may one day want + * to turn on reentrant mode here, if it proves too difficult to keep + * this working. But that's fairly easy for us to see; OTOH, people + * not using hooks because they're too slow is easy for us to miss. + * + * The tricky part is + * that this code might get invoked even if we don't have access to tsd. + * This function mimics getting a pointer to thread-local data, except + * that it might secretly return a pointer to some global data if we + * know that the caller will take the early-exit path. + * If we return a bool that indicates that we are reentrant, then the + * caller will go down the early exit path, leaving the global + * untouched. + */ + static bool in_hook_global = true; + tsdn_t *tsdn = tsdn_fetch(); + tcache_t *tcache = tsdn_tcachep_get(tsdn); + if (tcache != NULL) { + return &tcache->in_hook; + } + return &in_hook_global; +} + +#define HOOK_PROLOGUE \ + if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \ + return; \ + } \ + bool *in_hook = hook_reentrantp(); \ + if (*in_hook) { \ + return; \ + } \ + *in_hook = true; + +#define HOOK_EPILOGUE \ + *in_hook = false; + +void +hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw, + uintptr_t args_raw[3]) { + HOOK_PROLOGUE + + hooks_internal_t hook; + FOR_EACH_HOOK_BEGIN(&hook) + hook_alloc h = hook.hooks.alloc_hook; + if (h != NULL) { + h(hook.hooks.extra, type, result, result_raw, args_raw); + } + FOR_EACH_HOOK_END + + HOOK_EPILOGUE +} + +void +hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) { + HOOK_PROLOGUE + hooks_internal_t hook; + FOR_EACH_HOOK_BEGIN(&hook) + hook_dalloc h = hook.hooks.dalloc_hook; + if (h != NULL) { + h(hook.hooks.extra, type, address, args_raw); + } + FOR_EACH_HOOK_END + HOOK_EPILOGUE +} + +void +hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize, + size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) { + HOOK_PROLOGUE + hooks_internal_t hook; + FOR_EACH_HOOK_BEGIN(&hook) + hook_expand h = hook.hooks.expand_hook; + if (h != NULL) { + h(hook.hooks.extra, type, address, old_usize, new_usize, + result_raw, args_raw); + } + FOR_EACH_HOOK_END + HOOK_EPILOGUE +} diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c new file mode 100644 index 0000000..97da1ee --- /dev/null +++ b/deps/jemalloc/src/jemalloc.c @@ -0,0 +1,3931 @@ +#define JEMALLOC_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/hook.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/log.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/safety_check.h" +#include "jemalloc/internal/sc.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/util.h" + +/******************************************************************************/ +/* Data. */ + +/* Runtime configuration options. */ +const char *je_malloc_conf +#ifndef _WIN32 + JEMALLOC_ATTR(weak) +#endif + ; +bool opt_abort = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +bool opt_abort_conf = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +/* Intentionally default off, even with debug builds. */ +bool opt_confirm_conf = false; +const char *opt_junk = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + "true" +#else + "false" +#endif + ; +bool opt_junk_alloc = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + true +#else + false +#endif + ; +bool opt_junk_free = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + true +#else + false +#endif + ; + +bool opt_utrace = false; +bool opt_xmalloc = false; +bool opt_zero = false; +unsigned opt_narenas = 0; + +unsigned ncpus; + +/* Protects arenas initialization. */ +malloc_mutex_t arenas_lock; +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + * + * arenas[0..narenas_auto) are used for automatic multiplexing of threads and + * arenas. arenas[narenas_auto..narenas_total) are only used if the application + * takes some action to create them and allocate from them. + * + * Points to an arena_t. + */ +JEMALLOC_ALIGNED(CACHELINE) +atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; +static atomic_u_t narenas_total; /* Use narenas_total_*(). */ +/* Below three are read-only after initialization. */ +static arena_t *a0; /* arenas[0]. */ +unsigned narenas_auto; +unsigned manual_arena_base; + +typedef enum { + malloc_init_uninitialized = 3, + malloc_init_a0_initialized = 2, + malloc_init_recursible = 1, + malloc_init_initialized = 0 /* Common case --> jnz. */ +} malloc_init_t; +static malloc_init_t malloc_init_state = malloc_init_uninitialized; + +/* False should be the common case. Set to true to trigger initialization. */ +bool malloc_slow = true; + +/* When malloc_slow is true, set the corresponding bits for sanity check. */ +enum { + flag_opt_junk_alloc = (1U), + flag_opt_junk_free = (1U << 1), + flag_opt_zero = (1U << 2), + flag_opt_utrace = (1U << 3), + flag_opt_xmalloc = (1U << 4) +}; +static uint8_t malloc_slow_flags; + +#ifdef JEMALLOC_THREADED_INIT +/* Used to let the initializing thread recursively allocate. */ +# define NO_INITIALIZER ((unsigned long)0) +# define INITIALIZER pthread_self() +# define IS_INITIALIZER (malloc_initializer == pthread_self()) +static pthread_t malloc_initializer = NO_INITIALIZER; +#else +# define NO_INITIALIZER false +# define INITIALIZER true +# define IS_INITIALIZER malloc_initializer +static bool malloc_initializer = NO_INITIALIZER; +#endif + +/* Used to avoid initialization races. */ +#ifdef _WIN32 +#if _WIN32_WINNT >= 0x0600 +static malloc_mutex_t init_lock = SRWLOCK_INIT; +#else +static malloc_mutex_t init_lock; +static bool init_lock_initialized = false; + +JEMALLOC_ATTR(constructor) +static void WINAPI +_init_init_lock(void) { + /* + * If another constructor in the same binary is using mallctl to e.g. + * set up extent hooks, it may end up running before this one, and + * malloc_init_hard will crash trying to lock the uninitialized lock. So + * we force an initialization of the lock in malloc_init_hard as well. + * We don't try to care about atomicity of the accessed to the + * init_lock_initialized boolean, since it really only matters early in + * the process creation, before any separate thread normally starts + * doing anything. + */ + if (!init_lock_initialized) { + malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT, + malloc_mutex_rank_exclusive); + } + init_lock_initialized = true; +} + +#ifdef _MSC_VER +# pragma section(".CRT$XCU", read) +JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) +static const void (WINAPI *init_init_lock)(void) = _init_init_lock; +#endif +#endif +#else +static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; +#endif + +typedef struct { + void *p; /* Input pointer (as in realloc(p, s)). */ + size_t s; /* Request size. */ + void *r; /* Result pointer. */ +} malloc_utrace_t; + +#ifdef JEMALLOC_UTRACE +# define UTRACE(a, b, c) do { \ + if (unlikely(opt_utrace)) { \ + int utrace_serrno = errno; \ + malloc_utrace_t ut; \ + ut.p = (a); \ + ut.s = (b); \ + ut.r = (c); \ + utrace(&ut, sizeof(ut)); \ + errno = utrace_serrno; \ + } \ +} while (0) +#else +# define UTRACE(a, b, c) +#endif + +/* Whether encountered any invalid config options. */ +static bool had_conf_error = false; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static bool malloc_init_hard_a0(void); +static bool malloc_init_hard(void); + +/******************************************************************************/ +/* + * Begin miscellaneous support functions. + */ + +bool +malloc_initialized(void) { + return (malloc_init_state == malloc_init_initialized); +} + +JEMALLOC_ALWAYS_INLINE bool +malloc_init_a0(void) { + if (unlikely(malloc_init_state == malloc_init_uninitialized)) { + return malloc_init_hard_a0(); + } + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +malloc_init(void) { + if (unlikely(!malloc_initialized()) && malloc_init_hard()) { + return true; + } + return false; +} + +/* + * The a0*() functions are used instead of i{d,}alloc() in situations that + * cannot tolerate TLS variable access. + */ + +static void * +a0ialloc(size_t size, bool zero, bool is_internal) { + if (unlikely(malloc_init_a0())) { + return NULL; + } + + return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, + is_internal, arena_get(TSDN_NULL, 0, true), true); +} + +static void +a0idalloc(void *ptr, bool is_internal) { + idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); +} + +void * +a0malloc(size_t size) { + return a0ialloc(size, false, true); +} + +void +a0dalloc(void *ptr) { + a0idalloc(ptr, true); +} + +/* + * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive + * situations that cannot tolerate TLS variable access (TLS allocation and very + * early internal data structure initialization). + */ + +void * +bootstrap_malloc(size_t size) { + if (unlikely(size == 0)) { + size = 1; + } + + return a0ialloc(size, false, false); +} + +void * +bootstrap_calloc(size_t num, size_t size) { + size_t num_size; + + num_size = num * size; + if (unlikely(num_size == 0)) { + assert(num == 0 || size == 0); + num_size = 1; + } + + return a0ialloc(num_size, true, false); +} + +void +bootstrap_free(void *ptr) { + if (unlikely(ptr == NULL)) { + return; + } + + a0idalloc(ptr, false); +} + +void +arena_set(unsigned ind, arena_t *arena) { + atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); +} + +static void +narenas_total_set(unsigned narenas) { + atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); +} + +static void +narenas_total_inc(void) { + atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); +} + +unsigned +narenas_total_get(void) { + return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); +} + +/* Create a new arena and insert it into the arenas array at index ind. */ +static arena_t * +arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + arena_t *arena; + + assert(ind <= narenas_total_get()); + if (ind >= MALLOCX_ARENA_LIMIT) { + return NULL; + } + if (ind == narenas_total_get()) { + narenas_total_inc(); + } + + /* + * Another thread may have already initialized arenas[ind] if it's an + * auto arena. + */ + arena = arena_get(tsdn, ind, false); + if (arena != NULL) { + assert(arena_is_auto(arena)); + return arena; + } + + /* Actually initialize the arena. */ + arena = arena_new(tsdn, ind, extent_hooks); + + return arena; +} + +static void +arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { + if (ind == 0) { + return; + } + /* + * Avoid creating a new background thread just for the huge arena, which + * purges eagerly by default. + */ + if (have_background_thread && !arena_is_huge(ind)) { + if (background_thread_create(tsdn_tsd(tsdn), ind)) { + malloc_printf(": error in background thread " + "creation for arena %u. Abort.\n", ind); + abort(); + } + } +} + +arena_t * +arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + arena_t *arena; + + malloc_mutex_lock(tsdn, &arenas_lock); + arena = arena_init_locked(tsdn, ind, extent_hooks); + malloc_mutex_unlock(tsdn, &arenas_lock); + + arena_new_create_background_thread(tsdn, ind); + + return arena; +} + +static void +arena_bind(tsd_t *tsd, unsigned ind, bool internal) { + arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_inc(arena, internal); + + if (internal) { + tsd_iarena_set(tsd, arena); + } else { + tsd_arena_set(tsd, arena); + unsigned shard = atomic_fetch_add_u(&arena->binshard_next, 1, + ATOMIC_RELAXED); + tsd_binshards_t *bins = tsd_binshardsp_get(tsd); + for (unsigned i = 0; i < SC_NBINS; i++) { + assert(bin_infos[i].n_shards > 0 && + bin_infos[i].n_shards <= BIN_SHARDS_MAX); + bins->binshard[i] = shard % bin_infos[i].n_shards; + } + } +} + +void +arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { + arena_t *oldarena, *newarena; + + oldarena = arena_get(tsd_tsdn(tsd), oldind, false); + newarena = arena_get(tsd_tsdn(tsd), newind, false); + arena_nthreads_dec(oldarena, false); + arena_nthreads_inc(newarena, false); + tsd_arena_set(tsd, newarena); +} + +static void +arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { + arena_t *arena; + + arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_dec(arena, internal); + + if (internal) { + tsd_iarena_set(tsd, NULL); + } else { + tsd_arena_set(tsd, NULL); + } +} + +arena_tdata_t * +arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { + arena_tdata_t *tdata, *arenas_tdata_old; + arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); + unsigned narenas_tdata_old, i; + unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); + unsigned narenas_actual = narenas_total_get(); + + /* + * Dissociate old tdata array (and set up for deallocation upon return) + * if it's too small. + */ + if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { + arenas_tdata_old = arenas_tdata; + narenas_tdata_old = narenas_tdata; + arenas_tdata = NULL; + narenas_tdata = 0; + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); + } else { + arenas_tdata_old = NULL; + narenas_tdata_old = 0; + } + + /* Allocate tdata array if it's missing. */ + if (arenas_tdata == NULL) { + bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); + narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; + + if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { + *arenas_tdata_bypassp = true; + arenas_tdata = (arena_tdata_t *)a0malloc( + sizeof(arena_tdata_t) * narenas_tdata); + *arenas_tdata_bypassp = false; + } + if (arenas_tdata == NULL) { + tdata = NULL; + goto label_return; + } + assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); + } + + /* + * Copy to tdata array. It's possible that the actual number of arenas + * has increased since narenas_total_get() was called above, but that + * causes no correctness issues unless two threads concurrently execute + * the arenas.create mallctl, which we trust mallctl synchronization to + * prevent. + */ + + /* Copy/initialize tickers. */ + for (i = 0; i < narenas_actual; i++) { + if (i < narenas_tdata_old) { + ticker_copy(&arenas_tdata[i].decay_ticker, + &arenas_tdata_old[i].decay_ticker); + } else { + ticker_init(&arenas_tdata[i].decay_ticker, + DECAY_NTICKS_PER_UPDATE); + } + } + if (narenas_tdata > narenas_actual) { + memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) + * (narenas_tdata - narenas_actual)); + } + + /* Read the refreshed tdata array. */ + tdata = &arenas_tdata[ind]; +label_return: + if (arenas_tdata_old != NULL) { + a0dalloc(arenas_tdata_old); + } + return tdata; +} + +/* Slow path, called only by arena_choose(). */ +arena_t * +arena_choose_hard(tsd_t *tsd, bool internal) { + arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); + + if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { + unsigned choose = percpu_arena_choose(); + ret = arena_get(tsd_tsdn(tsd), choose, true); + assert(ret != NULL); + arena_bind(tsd, arena_ind_get(ret), false); + arena_bind(tsd, arena_ind_get(ret), true); + + return ret; + } + + if (narenas_auto > 1) { + unsigned i, j, choose[2], first_null; + bool is_new_arena[2]; + + /* + * Determine binding for both non-internal and internal + * allocation. + * + * choose[0]: For application allocation. + * choose[1]: For internal metadata allocation. + */ + + for (j = 0; j < 2; j++) { + choose[j] = 0; + is_new_arena[j] = false; + } + + first_null = narenas_auto; + malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); + assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); + for (i = 1; i < narenas_auto; i++) { + if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { + /* + * Choose the first arena that has the lowest + * number of threads assigned to it. + */ + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get( + tsd_tsdn(tsd), i, false), !!j) < + arena_nthreads_get(arena_get( + tsd_tsdn(tsd), choose[j], false), + !!j)) { + choose[j] = i; + } + } + } else if (first_null == narenas_auto) { + /* + * Record the index of the first uninitialized + * arena, in case all extant arenas are in use. + * + * NB: It is possible for there to be + * discontinuities in terms of initialized + * versus uninitialized arenas, due to the + * "thread.arena" mallctl. + */ + first_null = i; + } + } + + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), + choose[j], false), !!j) == 0 || first_null == + narenas_auto) { + /* + * Use an unloaded arena, or the least loaded + * arena if all arenas are already initialized. + */ + if (!!j == internal) { + ret = arena_get(tsd_tsdn(tsd), + choose[j], false); + } + } else { + arena_t *arena; + + /* Initialize a new arena. */ + choose[j] = first_null; + arena = arena_init_locked(tsd_tsdn(tsd), + choose[j], + (extent_hooks_t *)&extent_hooks_default); + if (arena == NULL) { + malloc_mutex_unlock(tsd_tsdn(tsd), + &arenas_lock); + return NULL; + } + is_new_arena[j] = true; + if (!!j == internal) { + ret = arena; + } + } + arena_bind(tsd, choose[j], !!j); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); + + for (j = 0; j < 2; j++) { + if (is_new_arena[j]) { + assert(choose[j] > 0); + arena_new_create_background_thread( + tsd_tsdn(tsd), choose[j]); + } + } + + } else { + ret = arena_get(tsd_tsdn(tsd), 0, false); + arena_bind(tsd, 0, false); + arena_bind(tsd, 0, true); + } + + return ret; +} + +void +iarena_cleanup(tsd_t *tsd) { + arena_t *iarena; + + iarena = tsd_iarena_get(tsd); + if (iarena != NULL) { + arena_unbind(tsd, arena_ind_get(iarena), true); + } +} + +void +arena_cleanup(tsd_t *tsd) { + arena_t *arena; + + arena = tsd_arena_get(tsd); + if (arena != NULL) { + arena_unbind(tsd, arena_ind_get(arena), false); + } +} + +void +arenas_tdata_cleanup(tsd_t *tsd) { + arena_tdata_t *arenas_tdata; + + /* Prevent tsd->arenas_tdata from being (re)created. */ + *tsd_arenas_tdata_bypassp_get(tsd) = true; + + arenas_tdata = tsd_arenas_tdata_get(tsd); + if (arenas_tdata != NULL) { + tsd_arenas_tdata_set(tsd, NULL); + a0dalloc(arenas_tdata); + } +} + +static void +stats_print_atexit(void) { + if (config_stats) { + tsdn_t *tsdn; + unsigned narenas, i; + + tsdn = tsdn_fetch(); + + /* + * Merge stats from extant threads. This is racy, since + * individual threads do not lock when recording tcache stats + * events. As a consequence, the final stats may be slightly + * out of date by the time they are reported, if other threads + * continue to allocate. + */ + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena = arena_get(tsdn, i, false); + if (arena != NULL) { + tcache_t *tcache; + + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + ql_foreach(tcache, &arena->tcache_ql, link) { + tcache_stats_merge(tsdn, tcache, arena); + } + malloc_mutex_unlock(tsdn, + &arena->tcache_ql_mtx); + } + } + } + je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); +} + +/* + * Ensure that we don't hold any locks upon entry to or exit from allocator + * code (in a "broad" sense that doesn't count a reentrant allocation as an + * entrance or exit). + */ +JEMALLOC_ALWAYS_INLINE void +check_entry_exit_locking(tsdn_t *tsdn) { + if (!config_debug) { + return; + } + if (tsdn_null(tsdn)) { + return; + } + tsd_t *tsd = tsdn_tsd(tsdn); + /* + * It's possible we hold locks at entry/exit if we're in a nested + * allocation. + */ + int8_t reentrancy_level = tsd_reentrancy_level_get(tsd); + if (reentrancy_level != 0) { + return; + } + witness_assert_lockless(tsdn_witness_tsdp_get(tsdn)); +} + +/* + * End miscellaneous support functions. + */ +/******************************************************************************/ +/* + * Begin initialization functions. + */ + +static char * +jemalloc_secure_getenv(const char *name) { +#ifdef JEMALLOC_HAVE_SECURE_GETENV + return secure_getenv(name); +#else +# ifdef JEMALLOC_HAVE_ISSETUGID + if (issetugid() != 0) { + return NULL; + } +# endif + return getenv(name); +#endif +} + +static unsigned +malloc_ncpus(void) { + long result; + +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + result = si.dwNumberOfProcessors; +#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) + /* + * glibc >= 2.6 has the CPU_COUNT macro. + * + * glibc's sysconf() uses isspace(). glibc allocates for the first time + * *before* setting up the isspace tables. Therefore we need a + * different method to get the number of CPUs. + */ + { + cpu_set_t set; + + pthread_getaffinity_np(pthread_self(), sizeof(set), &set); + result = CPU_COUNT(&set); + } +#else + result = sysconf(_SC_NPROCESSORS_ONLN); +#endif + return ((result == -1) ? 1 : (unsigned)result); +} + +static void +init_opt_stats_print_opts(const char *v, size_t vlen) { + size_t opts_len = strlen(opt_stats_print_opts); + assert(opts_len <= stats_print_tot_num_options); + + for (size_t i = 0; i < vlen; i++) { + switch (v[i]) { +#define OPTION(o, v, d, s) case o: break; + STATS_PRINT_OPTIONS +#undef OPTION + default: continue; + } + + if (strchr(opt_stats_print_opts, v[i]) != NULL) { + /* Ignore repeated. */ + continue; + } + + opt_stats_print_opts[opts_len++] = v[i]; + opt_stats_print_opts[opts_len] = '\0'; + assert(opts_len <= stats_print_tot_num_options); + } + assert(opts_len == strlen(opt_stats_print_opts)); +} + +/* Reads the next size pair in a multi-sized option. */ +static bool +malloc_conf_multi_sizes_next(const char **slab_size_segment_cur, + size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) { + const char *cur = *slab_size_segment_cur; + char *end; + uintmax_t um; + + set_errno(0); + + /* First number, then '-' */ + um = malloc_strtoumax(cur, &end, 0); + if (get_errno() != 0 || *end != '-') { + return true; + } + *slab_start = (size_t)um; + cur = end + 1; + + /* Second number, then ':' */ + um = malloc_strtoumax(cur, &end, 0); + if (get_errno() != 0 || *end != ':') { + return true; + } + *slab_end = (size_t)um; + cur = end + 1; + + /* Last number */ + um = malloc_strtoumax(cur, &end, 0); + if (get_errno() != 0) { + return true; + } + *new_size = (size_t)um; + + /* Consume the separator if there is one. */ + if (*end == '|') { + end++; + } + + *vlen_left -= end - *slab_size_segment_cur; + *slab_size_segment_cur = end; + + return false; +} + +static bool +malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, + char const **v_p, size_t *vlen_p) { + bool accept; + const char *opts = *opts_p; + + *k_p = opts; + + for (accept = false; !accept;) { + switch (*opts) { + case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': + case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': + case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': + case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': + case 'Y': case 'Z': + case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': + case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': + case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': + case 's': case 't': case 'u': case 'v': case 'w': case 'x': + case 'y': case 'z': + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': case '8': case '9': + case '_': + opts++; + break; + case ':': + opts++; + *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; + *v_p = opts; + accept = true; + break; + case '\0': + if (opts != *opts_p) { + malloc_write(": Conf string ends " + "with key\n"); + } + return true; + default: + malloc_write(": Malformed conf string\n"); + return true; + } + } + + for (accept = false; !accept;) { + switch (*opts) { + case ',': + opts++; + /* + * Look ahead one character here, because the next time + * this function is called, it will assume that end of + * input has been cleanly reached if no input remains, + * but we have optimistically already consumed the + * comma if one exists. + */ + if (*opts == '\0') { + malloc_write(": Conf string ends " + "with comma\n"); + } + *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; + accept = true; + break; + case '\0': + *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; + accept = true; + break; + default: + opts++; + break; + } + } + + *opts_p = opts; + return false; +} + +static void +malloc_abort_invalid_conf(void) { + assert(opt_abort_conf); + malloc_printf(": Abort (abort_conf:true) on invalid conf " + "value (see above).\n"); + abort(); +} + +static void +malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, + size_t vlen) { + malloc_printf(": %s: %.*s:%.*s\n", msg, (int)klen, k, + (int)vlen, v); + /* If abort_conf is set, error out after processing all options. */ + const char *experimental = "experimental_"; + if (strncmp(k, experimental, strlen(experimental)) == 0) { + /* However, tolerate experimental features. */ + return; + } + had_conf_error = true; +} + +static void +malloc_slow_flag_init(void) { + /* + * Combine the runtime options into malloc_slow for fast path. Called + * after processing all the options. + */ + malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) + | (opt_junk_free ? flag_opt_junk_free : 0) + | (opt_zero ? flag_opt_zero : 0) + | (opt_utrace ? flag_opt_utrace : 0) + | (opt_xmalloc ? flag_opt_xmalloc : 0); + + malloc_slow = (malloc_slow_flags != 0); +} + +/* Number of sources for initializing malloc_conf */ +#define MALLOC_CONF_NSOURCES 4 + +static const char * +obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) { + if (config_debug) { + static unsigned read_source = 0; + /* + * Each source should only be read once, to minimize # of + * syscalls on init. + */ + assert(read_source++ == which_source); + } + assert(which_source < MALLOC_CONF_NSOURCES); + + const char *ret; + switch (which_source) { + case 0: + ret = config_malloc_conf; + break; + case 1: + if (je_malloc_conf != NULL) { + /* Use options that were compiled into the program. */ + ret = je_malloc_conf; + } else { + /* No configuration specified. */ + ret = NULL; + } + break; + case 2: { + ssize_t linklen = 0; +#ifndef _WIN32 + int saved_errno = errno; + const char *linkname = +# ifdef JEMALLOC_PREFIX + "/etc/"JEMALLOC_PREFIX"malloc.conf" +# else + "/etc/malloc.conf" +# endif + ; + + /* + * Try to use the contents of the "/etc/malloc.conf" symbolic + * link's name. + */ +#ifndef JEMALLOC_READLINKAT + linklen = readlink(linkname, buf, PATH_MAX); +#else + linklen = readlinkat(AT_FDCWD, linkname, buf, PATH_MAX); +#endif + if (linklen == -1) { + /* No configuration specified. */ + linklen = 0; + /* Restore errno. */ + set_errno(saved_errno); + } +#endif + buf[linklen] = '\0'; + ret = buf; + break; + } case 3: { + const char *envname = +#ifdef JEMALLOC_PREFIX + JEMALLOC_CPREFIX"MALLOC_CONF" +#else + "MALLOC_CONF" +#endif + ; + + if ((ret = jemalloc_secure_getenv(envname)) != NULL) { + /* + * Do nothing; opts is already initialized to the value + * of the MALLOC_CONF environment variable. + */ + } else { + /* No configuration specified. */ + ret = NULL; + } + break; + } default: + not_reached(); + ret = NULL; + } + return ret; +} + +static void +malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], + bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES], + char buf[PATH_MAX + 1]) { + static const char *opts_explain[MALLOC_CONF_NSOURCES] = { + "string specified via --with-malloc-conf", + "string pointed to by the global variable malloc_conf", + "\"name\" of the file referenced by the symbolic link named " + "/etc/malloc.conf", + "value of the environment variable MALLOC_CONF" + }; + unsigned i; + const char *opts, *k, *v; + size_t klen, vlen; + + for (i = 0; i < MALLOC_CONF_NSOURCES; i++) { + /* Get runtime configuration. */ + if (initial_call) { + opts_cache[i] = obtain_malloc_conf(i, buf); + } + opts = opts_cache[i]; + if (!initial_call && opt_confirm_conf) { + malloc_printf( + ": malloc_conf #%u (%s): \"%s\"\n", + i + 1, opts_explain[i], opts != NULL ? opts : ""); + } + if (opts == NULL) { + continue; + } + + while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, + &vlen)) { + +#define CONF_ERROR(msg, k, klen, v, vlen) \ + if (!initial_call) { \ + malloc_conf_error( \ + msg, k, klen, v, vlen); \ + cur_opt_valid = false; \ + } +#define CONF_CONTINUE { \ + if (!initial_call && opt_confirm_conf \ + && cur_opt_valid) { \ + malloc_printf(": -- " \ + "Set conf value: %.*s:%.*s" \ + "\n", (int)klen, k, \ + (int)vlen, v); \ + } \ + continue; \ + } +#define CONF_MATCH(n) \ + (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) +#define CONF_MATCH_VALUE(n) \ + (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) +#define CONF_HANDLE_BOOL(o, n) \ + if (CONF_MATCH(n)) { \ + if (CONF_MATCH_VALUE("true")) { \ + o = true; \ + } else if (CONF_MATCH_VALUE("false")) { \ + o = false; \ + } else { \ + CONF_ERROR("Invalid conf value",\ + k, klen, v, vlen); \ + } \ + CONF_CONTINUE; \ + } + /* + * One of the CONF_MIN macros below expands, in one of the use points, + * to "unsigned integer < 0", which is always false, triggering the + * GCC -Wtype-limits warning, which we disable here and re-enable below. + */ + JEMALLOC_DIAGNOSTIC_PUSH + JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS + +#define CONF_DONT_CHECK_MIN(um, min) false +#define CONF_CHECK_MIN(um, min) ((um) < (min)) +#define CONF_DONT_CHECK_MAX(um, max) false +#define CONF_CHECK_MAX(um, max) ((um) > (max)) +#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ + if (CONF_MATCH(n)) { \ + uintmax_t um; \ + char *end; \ + \ + set_errno(0); \ + um = malloc_strtoumax(v, &end, 0); \ + if (get_errno() != 0 || (uintptr_t)end -\ + (uintptr_t)v != vlen) { \ + CONF_ERROR("Invalid conf value",\ + k, klen, v, vlen); \ + } else if (clip) { \ + if (check_min(um, (t)(min))) { \ + o = (t)(min); \ + } else if ( \ + check_max(um, (t)(max))) { \ + o = (t)(max); \ + } else { \ + o = (t)um; \ + } \ + } else { \ + if (check_min(um, (t)(min)) || \ + check_max(um, (t)(max))) { \ + CONF_ERROR( \ + "Out-of-range " \ + "conf value", \ + k, klen, v, vlen); \ + } else { \ + o = (t)um; \ + } \ + } \ + CONF_CONTINUE; \ + } +#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ + clip) \ + CONF_HANDLE_T_U(unsigned, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T_U(size_t, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ + if (CONF_MATCH(n)) { \ + long l; \ + char *end; \ + \ + set_errno(0); \ + l = strtol(v, &end, 0); \ + if (get_errno() != 0 || (uintptr_t)end -\ + (uintptr_t)v != vlen) { \ + CONF_ERROR("Invalid conf value",\ + k, klen, v, vlen); \ + } else if (l < (ssize_t)(min) || l > \ + (ssize_t)(max)) { \ + CONF_ERROR( \ + "Out-of-range conf value", \ + k, klen, v, vlen); \ + } else { \ + o = l; \ + } \ + CONF_CONTINUE; \ + } +#define CONF_HANDLE_CHAR_P(o, n, d) \ + if (CONF_MATCH(n)) { \ + size_t cpylen = (vlen <= \ + sizeof(o)-1) ? vlen : \ + sizeof(o)-1; \ + strncpy(o, v, cpylen); \ + o[cpylen] = '\0'; \ + CONF_CONTINUE; \ + } + + bool cur_opt_valid = true; + + CONF_HANDLE_BOOL(opt_confirm_conf, "confirm_conf") + if (initial_call) { + continue; + } + + CONF_HANDLE_BOOL(opt_abort, "abort") + CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") + if (strncmp("metadata_thp", k, klen) == 0) { + int i; + bool match = false; + for (i = 0; i < metadata_thp_mode_limit; i++) { + if (strncmp(metadata_thp_mode_names[i], + v, vlen) == 0) { + opt_metadata_thp = i; + match = true; + break; + } + } + if (!match) { + CONF_ERROR("Invalid conf value", + k, klen, v, vlen); + } + CONF_CONTINUE; + } + CONF_HANDLE_BOOL(opt_retain, "retain") + if (strncmp("dss", k, klen) == 0) { + int i; + bool match = false; + for (i = 0; i < dss_prec_limit; i++) { + if (strncmp(dss_prec_names[i], v, vlen) + == 0) { + if (extent_dss_prec_set(i)) { + CONF_ERROR( + "Error setting dss", + k, klen, v, vlen); + } else { + opt_dss = + dss_prec_names[i]; + match = true; + break; + } + } + } + if (!match) { + CONF_ERROR("Invalid conf value", + k, klen, v, vlen); + } + CONF_CONTINUE; + } + CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, + UINT_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, + false) + if (CONF_MATCH("bin_shards")) { + const char *bin_shards_segment_cur = v; + size_t vlen_left = vlen; + do { + size_t size_start; + size_t size_end; + size_t nshards; + bool err = malloc_conf_multi_sizes_next( + &bin_shards_segment_cur, &vlen_left, + &size_start, &size_end, &nshards); + if (err || bin_update_shard_size( + bin_shard_sizes, size_start, + size_end, nshards)) { + CONF_ERROR( + "Invalid settings for " + "bin_shards", k, klen, v, + vlen); + break; + } + } while (vlen_left > 0); + CONF_CONTINUE; + } + CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, + "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < + QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : + SSIZE_MAX); + CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, + "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < + QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : + SSIZE_MAX); + CONF_HANDLE_BOOL(opt_stats_print, "stats_print") + if (CONF_MATCH("stats_print_opts")) { + init_opt_stats_print_opts(v, vlen); + CONF_CONTINUE; + } + if (config_fill) { + if (CONF_MATCH("junk")) { + if (CONF_MATCH_VALUE("true")) { + opt_junk = "true"; + opt_junk_alloc = opt_junk_free = + true; + } else if (CONF_MATCH_VALUE("false")) { + opt_junk = "false"; + opt_junk_alloc = opt_junk_free = + false; + } else if (CONF_MATCH_VALUE("alloc")) { + opt_junk = "alloc"; + opt_junk_alloc = true; + opt_junk_free = false; + } else if (CONF_MATCH_VALUE("free")) { + opt_junk = "free"; + opt_junk_alloc = false; + opt_junk_free = true; + } else { + CONF_ERROR( + "Invalid conf value", + k, klen, v, vlen); + } + CONF_CONTINUE; + } + CONF_HANDLE_BOOL(opt_zero, "zero") + } + if (config_utrace) { + CONF_HANDLE_BOOL(opt_utrace, "utrace") + } + if (config_xmalloc) { + CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") + } + CONF_HANDLE_BOOL(opt_tcache, "tcache") + CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", + -1, (sizeof(size_t) << 3) - 1) + + /* + * The runtime option of oversize_threshold remains + * undocumented. It may be tweaked in the next major + * release (6.0). The default value 8M is rather + * conservative / safe. Tuning it further down may + * improve fragmentation a bit more, but may also cause + * contention on the huge arena. + */ + CONF_HANDLE_SIZE_T(opt_oversize_threshold, + "oversize_threshold", 0, SC_LARGE_MAXCLASS, + CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, false) + CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, + "lg_extent_max_active_fit", 0, + (sizeof(size_t) << 3), CONF_DONT_CHECK_MIN, + CONF_CHECK_MAX, false) + + if (strncmp("percpu_arena", k, klen) == 0) { + bool match = false; + for (int i = percpu_arena_mode_names_base; i < + percpu_arena_mode_names_limit; i++) { + if (strncmp(percpu_arena_mode_names[i], + v, vlen) == 0) { + if (!have_percpu_arena) { + CONF_ERROR( + "No getcpu support", + k, klen, v, vlen); + } + opt_percpu_arena = i; + match = true; + break; + } + } + if (!match) { + CONF_ERROR("Invalid conf value", + k, klen, v, vlen); + } + CONF_CONTINUE; + } + CONF_HANDLE_BOOL(opt_background_thread, + "background_thread"); + CONF_HANDLE_SIZE_T(opt_max_background_threads, + "max_background_threads", 1, + opt_max_background_threads, + CONF_CHECK_MIN, CONF_CHECK_MAX, + true); + if (CONF_MATCH("slab_sizes")) { + bool err; + const char *slab_size_segment_cur = v; + size_t vlen_left = vlen; + do { + size_t slab_start; + size_t slab_end; + size_t pgs; + err = malloc_conf_multi_sizes_next( + &slab_size_segment_cur, + &vlen_left, &slab_start, &slab_end, + &pgs); + if (!err) { + sc_data_update_slab_size( + sc_data, slab_start, + slab_end, (int)pgs); + } else { + CONF_ERROR("Invalid settings " + "for slab_sizes", + k, klen, v, vlen); + } + } while (!err && vlen_left > 0); + CONF_CONTINUE; + } + if (config_prof) { + CONF_HANDLE_BOOL(opt_prof, "prof") + CONF_HANDLE_CHAR_P(opt_prof_prefix, + "prof_prefix", "jeprof") + CONF_HANDLE_BOOL(opt_prof_active, "prof_active") + CONF_HANDLE_BOOL(opt_prof_thread_active_init, + "prof_thread_active_init") + CONF_HANDLE_SIZE_T(opt_lg_prof_sample, + "lg_prof_sample", 0, (sizeof(uint64_t) << 3) + - 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, + true) + CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") + CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, + "lg_prof_interval", -1, + (sizeof(uint64_t) << 3) - 1) + CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") + CONF_HANDLE_BOOL(opt_prof_final, "prof_final") + CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") + CONF_HANDLE_BOOL(opt_prof_log, "prof_log") + } + if (config_log) { + if (CONF_MATCH("log")) { + size_t cpylen = ( + vlen <= sizeof(log_var_names) ? + vlen : sizeof(log_var_names) - 1); + strncpy(log_var_names, v, cpylen); + log_var_names[cpylen] = '\0'; + CONF_CONTINUE; + } + } + if (CONF_MATCH("thp")) { + bool match = false; + for (int i = 0; i < thp_mode_names_limit; i++) { + if (strncmp(thp_mode_names[i],v, vlen) + == 0) { + if (!have_madvise_huge) { + CONF_ERROR( + "No THP support", + k, klen, v, vlen); + } + opt_thp = i; + match = true; + break; + } + } + if (!match) { + CONF_ERROR("Invalid conf value", + k, klen, v, vlen); + } + CONF_CONTINUE; + } + CONF_ERROR("Invalid conf pair", k, klen, v, vlen); +#undef CONF_ERROR +#undef CONF_CONTINUE +#undef CONF_MATCH +#undef CONF_MATCH_VALUE +#undef CONF_HANDLE_BOOL +#undef CONF_DONT_CHECK_MIN +#undef CONF_CHECK_MIN +#undef CONF_DONT_CHECK_MAX +#undef CONF_CHECK_MAX +#undef CONF_HANDLE_T_U +#undef CONF_HANDLE_UNSIGNED +#undef CONF_HANDLE_SIZE_T +#undef CONF_HANDLE_SSIZE_T +#undef CONF_HANDLE_CHAR_P + /* Re-enable diagnostic "-Wtype-limits" */ + JEMALLOC_DIAGNOSTIC_POP + } + if (opt_abort_conf && had_conf_error) { + malloc_abort_invalid_conf(); + } + } + atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); +} + +static void +malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { + const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL}; + char buf[PATH_MAX + 1]; + + /* The first call only set the confirm_conf option and opts_cache */ + malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf); + malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache, + NULL); +} + +#undef MALLOC_CONF_NSOURCES + +static bool +malloc_init_hard_needed(void) { + if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == + malloc_init_recursible)) { + /* + * Another thread initialized the allocator before this one + * acquired init_lock, or this thread is the initializing + * thread, and it is recursively allocating. + */ + return false; + } +#ifdef JEMALLOC_THREADED_INIT + if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { + /* Busy-wait until the initializing thread completes. */ + spin_t spinner = SPIN_INITIALIZER; + do { + malloc_mutex_unlock(TSDN_NULL, &init_lock); + spin_adaptive(&spinner); + malloc_mutex_lock(TSDN_NULL, &init_lock); + } while (!malloc_initialized()); + return false; + } +#endif + return true; +} + +static bool +malloc_init_hard_a0_locked() { + malloc_initializer = INITIALIZER; + + JEMALLOC_DIAGNOSTIC_PUSH + JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS + sc_data_t sc_data = {0}; + JEMALLOC_DIAGNOSTIC_POP + + /* + * Ordering here is somewhat tricky; we need sc_boot() first, since that + * determines what the size classes will be, and then + * malloc_conf_init(), since any slab size tweaking will need to be done + * before sz_boot and bin_boot, which assume that the values they read + * out of sc_data_global are final. + */ + sc_boot(&sc_data); + unsigned bin_shard_sizes[SC_NBINS]; + bin_shard_sizes_boot(bin_shard_sizes); + /* + * prof_boot0 only initializes opt_prof_prefix. We need to do it before + * we parse malloc_conf options, in case malloc_conf parsing overwrites + * it. + */ + if (config_prof) { + prof_boot0(); + } + malloc_conf_init(&sc_data, bin_shard_sizes); + sz_boot(&sc_data); + bin_boot(&sc_data, bin_shard_sizes); + + if (opt_stats_print) { + /* Print statistics at exit. */ + if (atexit(stats_print_atexit) != 0) { + malloc_write(": Error in atexit()\n"); + if (opt_abort) { + abort(); + } + } + } + if (pages_boot()) { + return true; + } + if (base_boot(TSDN_NULL)) { + return true; + } + if (extent_boot()) { + return true; + } + if (ctl_boot()) { + return true; + } + if (config_prof) { + prof_boot1(); + } + arena_boot(&sc_data); + if (tcache_boot(TSDN_NULL)) { + return true; + } + if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, + malloc_mutex_rank_exclusive)) { + return true; + } + hook_boot(); + /* + * Create enough scaffolding to allow recursive allocation in + * malloc_ncpus(). + */ + narenas_auto = 1; + manual_arena_base = narenas_auto + 1; + memset(arenas, 0, sizeof(arena_t *) * narenas_auto); + /* + * Initialize one arena here. The rest are lazily created in + * arena_choose_hard(). + */ + if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) + == NULL) { + return true; + } + a0 = arena_get(TSDN_NULL, 0, false); + malloc_init_state = malloc_init_a0_initialized; + + return false; +} + +static bool +malloc_init_hard_a0(void) { + bool ret; + + malloc_mutex_lock(TSDN_NULL, &init_lock); + ret = malloc_init_hard_a0_locked(); + malloc_mutex_unlock(TSDN_NULL, &init_lock); + return ret; +} + +/* Initialize data structures which may trigger recursive allocation. */ +static bool +malloc_init_hard_recursible(void) { + malloc_init_state = malloc_init_recursible; + + ncpus = malloc_ncpus(); + +#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ + && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ + !defined(__native_client__)) + /* LinuxThreads' pthread_atfork() allocates. */ + if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, + jemalloc_postfork_child) != 0) { + malloc_write(": Error in pthread_atfork()\n"); + if (opt_abort) { + abort(); + } + return true; + } +#endif + + if (background_thread_boot0()) { + return true; + } + + return false; +} + +static unsigned +malloc_narenas_default(void) { + assert(ncpus > 0); + /* + * For SMP systems, create more than one arena per CPU by + * default. + */ + if (ncpus > 1) { + return ncpus << 2; + } else { + return 1; + } +} + +static percpu_arena_mode_t +percpu_arena_as_initialized(percpu_arena_mode_t mode) { + assert(!malloc_initialized()); + assert(mode <= percpu_arena_disabled); + + if (mode != percpu_arena_disabled) { + mode += percpu_arena_mode_enabled_base; + } + + return mode; +} + +static bool +malloc_init_narenas(void) { + assert(ncpus > 0); + + if (opt_percpu_arena != percpu_arena_disabled) { + if (!have_percpu_arena || malloc_getcpu() < 0) { + opt_percpu_arena = percpu_arena_disabled; + malloc_printf(": perCPU arena getcpu() not " + "available. Setting narenas to %u.\n", opt_narenas ? + opt_narenas : malloc_narenas_default()); + if (opt_abort) { + abort(); + } + } else { + if (ncpus >= MALLOCX_ARENA_LIMIT) { + malloc_printf(": narenas w/ percpu" + "arena beyond limit (%d)\n", ncpus); + if (opt_abort) { + abort(); + } + return true; + } + /* NB: opt_percpu_arena isn't fully initialized yet. */ + if (percpu_arena_as_initialized(opt_percpu_arena) == + per_phycpu_arena && ncpus % 2 != 0) { + malloc_printf(": invalid " + "configuration -- per physical CPU arena " + "with odd number (%u) of CPUs (no hyper " + "threading?).\n", ncpus); + if (opt_abort) + abort(); + } + unsigned n = percpu_arena_ind_limit( + percpu_arena_as_initialized(opt_percpu_arena)); + if (opt_narenas < n) { + /* + * If narenas is specified with percpu_arena + * enabled, actual narenas is set as the greater + * of the two. percpu_arena_choose will be free + * to use any of the arenas based on CPU + * id. This is conservative (at a small cost) + * but ensures correctness. + * + * If for some reason the ncpus determined at + * boot is not the actual number (e.g. because + * of affinity setting from numactl), reserving + * narenas this way provides a workaround for + * percpu_arena. + */ + opt_narenas = n; + } + } + } + if (opt_narenas == 0) { + opt_narenas = malloc_narenas_default(); + } + assert(opt_narenas > 0); + + narenas_auto = opt_narenas; + /* + * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). + */ + if (narenas_auto >= MALLOCX_ARENA_LIMIT) { + narenas_auto = MALLOCX_ARENA_LIMIT - 1; + malloc_printf(": Reducing narenas to limit (%d)\n", + narenas_auto); + } + narenas_total_set(narenas_auto); + if (arena_init_huge()) { + narenas_total_inc(); + } + manual_arena_base = narenas_total_get(); + + return false; +} + +static void +malloc_init_percpu(void) { + opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); +} + +static bool +malloc_init_hard_finish(void) { + if (malloc_mutex_boot()) { + return true; + } + + malloc_init_state = malloc_init_initialized; + malloc_slow_flag_init(); + + return false; +} + +static void +malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { + malloc_mutex_assert_owner(tsdn, &init_lock); + malloc_mutex_unlock(tsdn, &init_lock); + if (reentrancy_set) { + assert(!tsdn_null(tsdn)); + tsd_t *tsd = tsdn_tsd(tsdn); + assert(tsd_reentrancy_level_get(tsd) > 0); + post_reentrancy(tsd); + } +} + +static bool +malloc_init_hard(void) { + tsd_t *tsd; + +#if defined(_WIN32) && _WIN32_WINNT < 0x0600 + _init_init_lock(); +#endif + malloc_mutex_lock(TSDN_NULL, &init_lock); + +#define UNLOCK_RETURN(tsdn, ret, reentrancy) \ + malloc_init_hard_cleanup(tsdn, reentrancy); \ + return ret; + + if (!malloc_init_hard_needed()) { + UNLOCK_RETURN(TSDN_NULL, false, false) + } + + if (malloc_init_state != malloc_init_a0_initialized && + malloc_init_hard_a0_locked()) { + UNLOCK_RETURN(TSDN_NULL, true, false) + } + + malloc_mutex_unlock(TSDN_NULL, &init_lock); + /* Recursive allocation relies on functional tsd. */ + tsd = malloc_tsd_boot0(); + if (tsd == NULL) { + return true; + } + if (malloc_init_hard_recursible()) { + return true; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); + /* Set reentrancy level to 1 during init. */ + pre_reentrancy(tsd, NULL); + /* Initialize narenas before prof_boot2 (for allocation). */ + if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) + } + if (config_prof && prof_boot2(tsd)) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) + } + + malloc_init_percpu(); + + if (malloc_init_hard_finish()) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) + } + post_reentrancy(tsd); + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); + + witness_assert_lockless(witness_tsd_tsdn( + tsd_witness_tsdp_get_unsafe(tsd))); + malloc_tsd_boot1(); + /* Update TSD after tsd_boot1. */ + tsd = tsd_fetch(); + if (opt_background_thread) { + assert(have_background_thread); + /* + * Need to finish init & unlock first before creating background + * threads (pthread_create depends on malloc). ctl_init (which + * sets isthreaded) needs to be called without holding any lock. + */ + background_thread_ctl_init(tsd_tsdn(tsd)); + if (background_thread_create(tsd, 0)) { + return true; + } + } +#undef UNLOCK_RETURN + return false; +} + +/* + * End initialization functions. + */ +/******************************************************************************/ +/* + * Begin allocation-path internal functions and data structures. + */ + +/* + * Settings determined by the documented behavior of the allocation functions. + */ +typedef struct static_opts_s static_opts_t; +struct static_opts_s { + /* Whether or not allocation size may overflow. */ + bool may_overflow; + + /* + * Whether or not allocations (with alignment) of size 0 should be + * treated as size 1. + */ + bool bump_empty_aligned_alloc; + /* + * Whether to assert that allocations are not of size 0 (after any + * bumping). + */ + bool assert_nonempty_alloc; + + /* + * Whether or not to modify the 'result' argument to malloc in case of + * error. + */ + bool null_out_result_on_error; + /* Whether to set errno when we encounter an error condition. */ + bool set_errno_on_error; + + /* + * The minimum valid alignment for functions requesting aligned storage. + */ + size_t min_alignment; + + /* The error string to use if we oom. */ + const char *oom_string; + /* The error string to use if the passed-in alignment is invalid. */ + const char *invalid_alignment_string; + + /* + * False if we're configured to skip some time-consuming operations. + * + * This isn't really a malloc "behavior", but it acts as a useful + * summary of several other static (or at least, static after program + * initialization) options. + */ + bool slow; + /* + * Return size. + */ + bool usize; +}; + +JEMALLOC_ALWAYS_INLINE void +static_opts_init(static_opts_t *static_opts) { + static_opts->may_overflow = false; + static_opts->bump_empty_aligned_alloc = false; + static_opts->assert_nonempty_alloc = false; + static_opts->null_out_result_on_error = false; + static_opts->set_errno_on_error = false; + static_opts->min_alignment = 0; + static_opts->oom_string = ""; + static_opts->invalid_alignment_string = ""; + static_opts->slow = false; + static_opts->usize = false; +} + +/* + * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we + * should have one constant here per magic value there. Note however that the + * representations need not be related. + */ +#define TCACHE_IND_NONE ((unsigned)-1) +#define TCACHE_IND_AUTOMATIC ((unsigned)-2) +#define ARENA_IND_AUTOMATIC ((unsigned)-1) + +typedef struct dynamic_opts_s dynamic_opts_t; +struct dynamic_opts_s { + void **result; + size_t usize; + size_t num_items; + size_t item_size; + size_t alignment; + bool zero; + unsigned tcache_ind; + unsigned arena_ind; +}; + +JEMALLOC_ALWAYS_INLINE void +dynamic_opts_init(dynamic_opts_t *dynamic_opts) { + dynamic_opts->result = NULL; + dynamic_opts->usize = 0; + dynamic_opts->num_items = 0; + dynamic_opts->item_size = 0; + dynamic_opts->alignment = 0; + dynamic_opts->zero = false; + dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; + dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; +} + +/* ind is ignored if dopts->alignment > 0. */ +JEMALLOC_ALWAYS_INLINE void * +imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, + size_t size, size_t usize, szind_t ind) { + tcache_t *tcache; + arena_t *arena; + + /* Fill in the tcache. */ + if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { + if (likely(!sopts->slow)) { + /* Getting tcache ptr unconditionally. */ + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + tcache = tcache_get(tsd); + } + } else if (dopts->tcache_ind == TCACHE_IND_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, dopts->tcache_ind); + } + + /* Fill in the arena. */ + if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { + /* + * In case of automatic arena management, we defer arena + * computation until as late as we can, hoping to fill the + * allocation out of the tcache. + */ + arena = NULL; + } else { + arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); + } + + if (unlikely(dopts->alignment != 0)) { + return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, + dopts->zero, tcache, arena); + } + + return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, + arena, sopts->slow); +} + +JEMALLOC_ALWAYS_INLINE void * +imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, + size_t usize, szind_t ind) { + void *ret; + + /* + * For small allocations, sampling bumps the usize. If so, we allocate + * from the ind_large bucket. + */ + szind_t ind_large; + size_t bumped_usize = usize; + + if (usize <= SC_SMALL_MAXCLASS) { + assert(((dopts->alignment == 0) ? + sz_s2u(SC_LARGE_MINCLASS) : + sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment)) + == SC_LARGE_MINCLASS); + ind_large = sz_size2index(SC_LARGE_MINCLASS); + bumped_usize = sz_s2u(SC_LARGE_MINCLASS); + ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, + bumped_usize, ind_large); + if (unlikely(ret == NULL)) { + return NULL; + } + arena_prof_promote(tsd_tsdn(tsd), ret, usize); + } else { + ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); + } + + return ret; +} + +/* + * Returns true if the allocation will overflow, and false otherwise. Sets + * *size to the product either way. + */ +JEMALLOC_ALWAYS_INLINE bool +compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, + size_t *size) { + /* + * This function is just num_items * item_size, except that we may have + * to check for overflow. + */ + + if (!may_overflow) { + assert(dopts->num_items == 1); + *size = dopts->item_size; + return false; + } + + /* A size_t with its high-half bits all set to 1. */ + static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); + + *size = dopts->item_size * dopts->num_items; + + if (unlikely(*size == 0)) { + return (dopts->num_items != 0 && dopts->item_size != 0); + } + + /* + * We got a non-zero size, but we don't know if we overflowed to get + * there. To avoid having to do a divide, we'll be clever and note that + * if both A and B can be represented in N/2 bits, then their product + * can be represented in N bits (without the possibility of overflow). + */ + if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { + return false; + } + if (likely(*size / dopts->item_size == dopts->num_items)) { + return false; + } + return true; +} + +JEMALLOC_ALWAYS_INLINE int +imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { + /* Where the actual allocated memory will live. */ + void *allocation = NULL; + /* Filled in by compute_size_with_overflow below. */ + size_t size = 0; + /* + * For unaligned allocations, we need only ind. For aligned + * allocations, or in case of stats or profiling we need usize. + * + * These are actually dead stores, in that their values are reset before + * any branch on their value is taken. Sometimes though, it's + * convenient to pass them as arguments before this point. To avoid + * undefined behavior then, we initialize them with dummy stores. + */ + szind_t ind = 0; + size_t usize = 0; + + /* Reentrancy is only checked on slow path. */ + int8_t reentrancy_level; + + /* Compute the amount of memory the user wants. */ + if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, + &size))) { + goto label_oom; + } + + if (unlikely(dopts->alignment < sopts->min_alignment + || (dopts->alignment & (dopts->alignment - 1)) != 0)) { + goto label_invalid_alignment; + } + + /* This is the beginning of the "core" algorithm. */ + + if (dopts->alignment == 0) { + ind = sz_size2index(size); + if (unlikely(ind >= SC_NSIZES)) { + goto label_oom; + } + if (config_stats || (config_prof && opt_prof) || sopts->usize) { + usize = sz_index2size(ind); + dopts->usize = usize; + assert(usize > 0 && usize + <= SC_LARGE_MAXCLASS); + } + } else { + if (sopts->bump_empty_aligned_alloc) { + if (unlikely(size == 0)) { + size = 1; + } + } + usize = sz_sa2u(size, dopts->alignment); + dopts->usize = usize; + if (unlikely(usize == 0 + || usize > SC_LARGE_MAXCLASS)) { + goto label_oom; + } + } + /* Validate the user input. */ + if (sopts->assert_nonempty_alloc) { + assert (size != 0); + } + + check_entry_exit_locking(tsd_tsdn(tsd)); + + /* + * If we need to handle reentrancy, we can do it out of a + * known-initialized arena (i.e. arena 0). + */ + reentrancy_level = tsd_reentrancy_level_get(tsd); + if (sopts->slow && unlikely(reentrancy_level > 0)) { + /* + * We should never specify particular arenas or tcaches from + * within our internal allocations. + */ + assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || + dopts->tcache_ind == TCACHE_IND_NONE); + assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); + dopts->tcache_ind = TCACHE_IND_NONE; + /* We know that arena 0 has already been initialized. */ + dopts->arena_ind = 0; + } + + /* If profiling is on, get our profiling context. */ + if (config_prof && opt_prof) { + /* + * Note that if we're going down this path, usize must have been + * initialized in the previous if statement. + */ + prof_tctx_t *tctx = prof_alloc_prep( + tsd, usize, prof_active_get_unlocked(), true); + + alloc_ctx_t alloc_ctx; + if (likely((uintptr_t)tctx == (uintptr_t)1U)) { + alloc_ctx.slab = (usize + <= SC_SMALL_MAXCLASS); + allocation = imalloc_no_sample( + sopts, dopts, tsd, usize, usize, ind); + } else if ((uintptr_t)tctx > (uintptr_t)1U) { + /* + * Note that ind might still be 0 here. This is fine; + * imalloc_sample ignores ind if dopts->alignment > 0. + */ + allocation = imalloc_sample( + sopts, dopts, tsd, usize, ind); + alloc_ctx.slab = false; + } else { + allocation = NULL; + } + + if (unlikely(allocation == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + goto label_oom; + } + prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); + } else { + /* + * If dopts->alignment > 0, then ind is still 0, but usize was + * computed in the previous if statement. Down the positive + * alignment path, imalloc_no_sample ignores ind and size + * (relying only on usize). + */ + allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, + ind); + if (unlikely(allocation == NULL)) { + goto label_oom; + } + } + + /* + * Allocation has been done at this point. We still have some + * post-allocation work to do though. + */ + assert(dopts->alignment == 0 + || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); + + if (config_stats) { + assert(usize == isalloc(tsd_tsdn(tsd), allocation)); + *tsd_thread_allocatedp_get(tsd) += usize; + } + + if (sopts->slow) { + UTRACE(0, size, allocation); + } + + /* Success! */ + check_entry_exit_locking(tsd_tsdn(tsd)); + *dopts->result = allocation; + return 0; + +label_oom: + if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->oom_string); + abort(); + } + + if (sopts->slow) { + UTRACE(NULL, size, NULL); + } + + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (sopts->set_errno_on_error) { + set_errno(ENOMEM); + } + + if (sopts->null_out_result_on_error) { + *dopts->result = NULL; + } + + return ENOMEM; + + /* + * This label is only jumped to by one goto; we move it out of line + * anyways to avoid obscuring the non-error paths, and for symmetry with + * the oom case. + */ +label_invalid_alignment: + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->invalid_alignment_string); + abort(); + } + + if (sopts->set_errno_on_error) { + set_errno(EINVAL); + } + + if (sopts->slow) { + UTRACE(NULL, size, NULL); + } + + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (sopts->null_out_result_on_error) { + *dopts->result = NULL; + } + + return EINVAL; +} + +JEMALLOC_ALWAYS_INLINE bool +imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) { + if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->oom_string); + abort(); + } + UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); + set_errno(ENOMEM); + *dopts->result = NULL; + + return false; + } + + return true; +} + +/* Returns the errno-style error code of the allocation. */ +JEMALLOC_ALWAYS_INLINE int +imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { + if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { + return ENOMEM; + } + + /* We always need the tsd. Let's grab it right away. */ + tsd_t *tsd = tsd_fetch(); + assert(tsd); + if (likely(tsd_fast(tsd))) { + /* Fast and common path. */ + tsd_assert_fast(tsd); + sopts->slow = false; + return imalloc_body(sopts, dopts, tsd); + } else { + if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { + return ENOMEM; + } + + sopts->slow = true; + return imalloc_body(sopts, dopts, tsd); + } +} + +JEMALLOC_NOINLINE +void * +malloc_default(size_t size) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.malloc.entry", "size: %zu", size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = ": Error in malloc(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + + imalloc(&sopts, &dopts); + /* + * Note that this branch gets optimized away -- it immediately follows + * the check on tsd_fast that sets sopts.slow. + */ + if (sopts.slow) { + uintptr_t args[3] = {size}; + hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args); + } + + LOG("core.malloc.exit", "result: %p", ret); + + return ret; +} + +/******************************************************************************/ +/* + * Begin malloc(3)-compatible functions. + */ + +/* + * malloc() fastpath. + * + * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit + * tcache. If either of these is false, we tail-call to the slowpath, + * malloc_default(). Tail-calling is used to avoid any caller-saved + * registers. + * + * fastpath supports ticker and profiling, both of which will also + * tail-call to the slowpath if they fire. + */ +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_malloc(size_t size) { + LOG("core.malloc.entry", "size: %zu", size); + + if (tsd_get_allocates() && unlikely(!malloc_initialized())) { + return malloc_default(size); + } + + tsd_t *tsd = tsd_get(false); + if (unlikely(!tsd || !tsd_fast(tsd) || (size > SC_LOOKUP_MAXCLASS))) { + return malloc_default(size); + } + + tcache_t *tcache = tsd_tcachep_get(tsd); + + if (unlikely(ticker_trytick(&tcache->gc_ticker))) { + return malloc_default(size); + } + + szind_t ind = sz_size2index_lookup(size); + size_t usize; + if (config_stats || config_prof) { + usize = sz_index2size(ind); + } + /* Fast path relies on size being a bin. I.e. SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS */ + assert(ind < SC_NBINS); + assert(size <= SC_SMALL_MAXCLASS); + + if (config_prof) { + int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd); + bytes_until_sample -= usize; + tsd_bytes_until_sample_set(tsd, bytes_until_sample); + + if (unlikely(bytes_until_sample < 0)) { + /* + * Avoid a prof_active check on the fastpath. + * If prof_active is false, set bytes_until_sample to + * a large value. If prof_active is set to true, + * bytes_until_sample will be reset. + */ + if (!prof_active) { + tsd_bytes_until_sample_set(tsd, SSIZE_MAX); + } + return malloc_default(size); + } + } + + cache_bin_t *bin = tcache_small_bin_get(tcache, ind); + bool tcache_success; + void* ret = cache_bin_alloc_easy(bin, &tcache_success); + + if (tcache_success) { + if (config_stats) { + *tsd_thread_allocatedp_get(tsd) += usize; + bin->tstats.nrequests++; + } + if (config_prof) { + tcache->prof_accumbytes += usize; + } + + LOG("core.malloc.exit", "result: %p", ret); + + /* Fastpath success */ + return ret; + } + + return malloc_default(size); +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +JEMALLOC_ATTR(nonnull(1)) +je_posix_memalign(void **memptr, size_t alignment, size_t size) { + int ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, " + "size: %zu", memptr, alignment, size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_aligned_alloc = true; + sopts.min_alignment = sizeof(void *); + sopts.oom_string = + ": Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + ": Error allocating aligned memory: invalid alignment\n"; + + dopts.result = memptr; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + ret = imalloc(&sopts, &dopts); + if (sopts.slow) { + uintptr_t args[3] = {(uintptr_t)memptr, (uintptr_t)alignment, + (uintptr_t)size}; + hook_invoke_alloc(hook_alloc_posix_memalign, *memptr, + (uintptr_t)ret, args); + } + + LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret, + *memptr); + + return ret; +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) +je_aligned_alloc(size_t alignment, size_t size) { + void *ret; + + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n", + alignment, size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_aligned_alloc = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.min_alignment = 1; + sopts.oom_string = + ": Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + ": Error allocating aligned memory: invalid alignment\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + imalloc(&sopts, &dopts); + if (sopts.slow) { + uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size}; + hook_invoke_alloc(hook_alloc_aligned_alloc, ret, + (uintptr_t)ret, args); + } + + LOG("core.aligned_alloc.exit", "result: %p", ret); + + return ret; +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) +je_calloc(size_t num, size_t size) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.may_overflow = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = ": Error in calloc(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = num; + dopts.item_size = size; + dopts.zero = true; + + imalloc(&sopts, &dopts); + if (sopts.slow) { + uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size}; + hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args); + } + + LOG("core.calloc.exit", "result: %p", ret); + + return ret; +} + +static void * +irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, + prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) { + void *p; + + if (tctx == NULL) { + return NULL; + } + if (usize <= SC_SMALL_MAXCLASS) { + p = iralloc(tsd, old_ptr, old_usize, + SC_LARGE_MINCLASS, 0, false, hook_args); + if (p == NULL) { + return NULL; + } + arena_prof_promote(tsd_tsdn(tsd), p, usize); + } else { + p = iralloc(tsd, old_ptr, old_usize, usize, 0, false, + hook_args); + } + + return p; +} + +JEMALLOC_ALWAYS_INLINE void * +irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, + alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) { + void *p; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); + tctx = prof_alloc_prep(tsd, usize, prof_active, true); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx, + hook_args); + } else { + p = iralloc(tsd, old_ptr, old_usize, usize, 0, false, + hook_args); + } + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return NULL; + } + prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, + old_tctx); + + return p; +} + +JEMALLOC_ALWAYS_INLINE void +ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { + if (!slow_path) { + tsd_assert_fast(tsd); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + if (tsd_reentrancy_level_get(tsd) != 0) { + assert(slow_path); + } + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != SC_NSIZES); + + size_t usize; + if (config_prof && opt_prof) { + usize = sz_index2size(alloc_ctx.szind); + prof_free(tsd, ptr, usize, &alloc_ctx); + } else if (config_stats) { + usize = sz_index2size(alloc_ctx.szind); + } + if (config_stats) { + *tsd_thread_deallocatedp_get(tsd) += usize; + } + + if (likely(!slow_path)) { + idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, + false); + } else { + idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, + true); + } +} + +JEMALLOC_ALWAYS_INLINE void +isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { + if (!slow_path) { + tsd_assert_fast(tsd); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + if (tsd_reentrancy_level_get(tsd) != 0) { + assert(slow_path); + } + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + alloc_ctx_t alloc_ctx, *ctx; + if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) { + /* + * When cache_oblivious is disabled and ptr is not page aligned, + * the allocation was not sampled -- usize can be used to + * determine szind directly. + */ + alloc_ctx.szind = sz_size2index(usize); + alloc_ctx.slab = true; + ctx = &alloc_ctx; + if (config_debug) { + alloc_ctx_t dbg_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, + rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind, + &dbg_ctx.slab); + assert(dbg_ctx.szind == alloc_ctx.szind); + assert(dbg_ctx.slab == alloc_ctx.slab); + } + } else if (config_prof && opt_prof) { + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind == sz_size2index(usize)); + ctx = &alloc_ctx; + } else { + ctx = NULL; + } + + if (config_prof && opt_prof) { + prof_free(tsd, ptr, usize, ctx); + } + if (config_stats) { + *tsd_thread_deallocatedp_get(tsd) += usize; + } + + if (likely(!slow_path)) { + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); + } else { + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); + } +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_realloc(void *ptr, size_t arg_size) { + void *ret; + tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + size_t old_usize = 0; + size_t size = arg_size; + + LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); + + if (unlikely(size == 0)) { + if (ptr != NULL) { + /* realloc(ptr, 0) is equivalent to free(ptr). */ + UTRACE(ptr, 0, 0); + tcache_t *tcache; + tsd_t *tsd = tsd_fetch(); + if (tsd_reentrancy_level_get(tsd) == 0) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + + uintptr_t args[3] = {(uintptr_t)ptr, size}; + hook_invoke_dalloc(hook_dalloc_realloc, ptr, args); + + ifree(tsd, ptr, tcache, true); + + LOG("core.realloc.exit", "result: %p", NULL); + return NULL; + } + size = 1; + } + + if (likely(ptr != NULL)) { + assert(malloc_initialized() || IS_INITIALIZER); + tsd_t *tsd = tsd_fetch(); + + check_entry_exit_locking(tsd_tsdn(tsd)); + + + hook_ralloc_args_t hook_args = {true, {(uintptr_t)ptr, + (uintptr_t)arg_size, 0, 0}}; + + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != SC_NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); + if (config_prof && opt_prof) { + usize = sz_s2u(size); + if (unlikely(usize == 0 + || usize > SC_LARGE_MAXCLASS)) { + ret = NULL; + } else { + ret = irealloc_prof(tsd, ptr, old_usize, usize, + &alloc_ctx, &hook_args); + } + } else { + if (config_stats) { + usize = sz_s2u(size); + } + ret = iralloc(tsd, ptr, old_usize, size, 0, false, + &hook_args); + } + tsdn = tsd_tsdn(tsd); + } else { + /* realloc(NULL, size) is equivalent to malloc(size). */ + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = + ": Error in realloc(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + + imalloc(&sopts, &dopts); + if (sopts.slow) { + uintptr_t args[3] = {(uintptr_t)ptr, arg_size}; + hook_invoke_alloc(hook_alloc_realloc, ret, + (uintptr_t)ret, args); + } + + return ret; + } + + if (unlikely(ret == NULL)) { + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error in realloc(): " + "out of memory\n"); + abort(); + } + set_errno(ENOMEM); + } + if (config_stats && likely(ret != NULL)) { + tsd_t *tsd; + + assert(usize == isalloc(tsdn, ret)); + tsd = tsdn_tsd(tsdn); + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; + } + UTRACE(ptr, size, ret); + check_entry_exit_locking(tsdn); + + LOG("core.realloc.exit", "result: %p", ret); + return ret; +} + +JEMALLOC_NOINLINE +void +free_default(void *ptr) { + UTRACE(ptr, 0, 0); + if (likely(ptr != NULL)) { + /* + * We avoid setting up tsd fully (e.g. tcache, arena binding) + * based on only free() calls -- other activities trigger the + * minimal to full transition. This is because free() may + * happen during thread shutdown after tls deallocation: if a + * thread never had any malloc activities until then, a + * fully-setup tsd won't be destructed properly. + */ + tsd_t *tsd = tsd_fetch_min(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; + if (likely(tsd_fast(tsd))) { + tsd_assert_fast(tsd); + /* Unconditionally get tcache ptr on fast path. */ + tcache = tsd_tcachep_get(tsd); + ifree(tsd, ptr, tcache, false); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + uintptr_t args_raw[3] = {(uintptr_t)ptr}; + hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw); + ifree(tsd, ptr, tcache, true); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + } +} + +JEMALLOC_ALWAYS_INLINE +bool free_fastpath(void *ptr, size_t size, bool size_hint) { + tsd_t *tsd = tsd_get(false); + if (unlikely(!tsd || !tsd_fast(tsd))) { + return false; + } + + tcache_t *tcache = tsd_tcachep_get(tsd); + + alloc_ctx_t alloc_ctx; + /* + * If !config_cache_oblivious, we can check PAGE alignment to + * detect sampled objects. Otherwise addresses are + * randomized, and we have to look it up in the rtree anyway. + * See also isfree(). + */ + if (!size_hint || config_cache_oblivious) { + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &extents_rtree, + rtree_ctx, (uintptr_t)ptr, + &alloc_ctx.szind, &alloc_ctx.slab); + + /* Note: profiled objects will have alloc_ctx.slab set */ + if (!res || !alloc_ctx.slab) { + return false; + } + assert(alloc_ctx.szind != SC_NSIZES); + } else { + /* + * Check for both sizes that are too large, and for sampled objects. + * Sampled objects are always page-aligned. The sampled object check + * will also check for null ptr. + */ + if (size > SC_LOOKUP_MAXCLASS || (((uintptr_t)ptr & PAGE_MASK) == 0)) { + return false; + } + alloc_ctx.szind = sz_size2index_lookup(size); + } + + if (unlikely(ticker_trytick(&tcache->gc_ticker))) { + return false; + } + + cache_bin_t *bin = tcache_small_bin_get(tcache, alloc_ctx.szind); + cache_bin_info_t *bin_info = &tcache_bin_info[alloc_ctx.szind]; + if (!cache_bin_dalloc_easy(bin, bin_info, ptr)) { + return false; + } + + if (config_stats) { + size_t usize = sz_index2size(alloc_ctx.szind); + *tsd_thread_deallocatedp_get(tsd) += usize; + } + + return true; +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_free(void *ptr) { + LOG("core.free.entry", "ptr: %p", ptr); + + if (!free_fastpath(ptr, 0, false)) { + free_default(ptr); + } + + LOG("core.free.exit", ""); +} + +/* + * End malloc(3)-compatible functions. + */ +/******************************************************************************/ +/* + * Begin non-standard override functions. + */ + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) +je_memalign(size_t alignment, size_t size) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment, + size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.min_alignment = 1; + sopts.oom_string = + ": Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + ": Error allocating aligned memory: invalid alignment\n"; + sopts.null_out_result_on_error = true; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + imalloc(&sopts, &dopts); + if (sopts.slow) { + uintptr_t args[3] = {alignment, size}; + hook_invoke_alloc(hook_alloc_memalign, ret, (uintptr_t)ret, + args); + } + + LOG("core.memalign.exit", "result: %p", ret); + return ret; +} +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) +je_valloc(size_t size) { + void *ret; + + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.valloc.entry", "size: %zu\n", size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.null_out_result_on_error = true; + sopts.min_alignment = PAGE; + sopts.oom_string = + ": Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + ": Error allocating aligned memory: invalid alignment\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = PAGE; + + imalloc(&sopts, &dopts); + if (sopts.slow) { + uintptr_t args[3] = {size}; + hook_invoke_alloc(hook_alloc_valloc, ret, (uintptr_t)ret, args); + } + + LOG("core.valloc.exit", "result: %p\n", ret); + return ret; +} +#endif + +#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) +/* + * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible + * to inconsistently reference libc's malloc(3)-compatible functions + * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). + * + * These definitions interpose hooks in glibc. The functions are actually + * passed an extra argument for the caller return address, which will be + * ignored. + */ +JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; +JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; +JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; +# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK +JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = + je_memalign; +# endif + +# ifdef CPU_COUNT +/* + * To enable static linking with glibc, the libc specific malloc interface must + * be implemented also, so none of glibc's malloc.o functions are added to the + * link. + */ +# define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) +/* To force macro expansion of je_ prefix before stringification. */ +# define PREALIAS(je_fn) ALIAS(je_fn) +# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC +void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_FREE +void __libc_free(void* ptr) PREALIAS(je_free); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC +void *__libc_malloc(size_t size) PREALIAS(je_malloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN +void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC +void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC +void *__libc_valloc(size_t size) PREALIAS(je_valloc); +# endif +# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN +int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); +# endif +# undef PREALIAS +# undef ALIAS +# endif +#endif + +/* + * End non-standard override functions. + */ +/******************************************************************************/ +/* + * Begin non-standard functions. + */ + +#ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API + +#define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y +#define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \ + JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) + +typedef struct { + void *ptr; + size_t size; +} smallocx_return_t; + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +smallocx_return_t JEMALLOC_NOTHROW +/* + * The attribute JEMALLOC_ATTR(malloc) cannot be used due to: + * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86488 + */ +JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT) + (size_t size, int flags) { + /* + * Note: the attribute JEMALLOC_ALLOC_SIZE(1) cannot be + * used here because it makes writing beyond the `size` + * of the `ptr` undefined behavior, but the objective + * of this function is to allow writing beyond `size` + * up to `smallocx_return_t::size`. + */ + smallocx_return_t ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.smallocx.entry", "size: %zu, flags: %d", size, flags); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.assert_nonempty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.oom_string = ": Error in mallocx(): out of memory\n"; + sopts.usize = true; + + dopts.result = &ret.ptr; + dopts.num_items = 1; + dopts.item_size = size; + if (unlikely(flags != 0)) { + if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { + dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); + } + + dopts.zero = MALLOCX_ZERO_GET(flags); + + if ((flags & MALLOCX_TCACHE_MASK) != 0) { + if ((flags & MALLOCX_TCACHE_MASK) + == MALLOCX_TCACHE_NONE) { + dopts.tcache_ind = TCACHE_IND_NONE; + } else { + dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); + } + } else { + dopts.tcache_ind = TCACHE_IND_AUTOMATIC; + } + + if ((flags & MALLOCX_ARENA_MASK) != 0) + dopts.arena_ind = MALLOCX_ARENA_GET(flags); + } + + imalloc(&sopts, &dopts); + assert(dopts.usize == je_nallocx(size, flags)); + ret.size = dopts.usize; + + LOG("core.smallocx.exit", "result: %p, size: %zu", ret.ptr, ret.size); + return ret; +} +#undef JEMALLOC_SMALLOCX_CONCAT_HELPER +#undef JEMALLOC_SMALLOCX_CONCAT_HELPER2 +#endif + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_mallocx(size_t size, int flags) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.assert_nonempty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.oom_string = ": Error in mallocx(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + if (unlikely(flags != 0)) { + if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { + dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); + } + + dopts.zero = MALLOCX_ZERO_GET(flags); + + if ((flags & MALLOCX_TCACHE_MASK) != 0) { + if ((flags & MALLOCX_TCACHE_MASK) + == MALLOCX_TCACHE_NONE) { + dopts.tcache_ind = TCACHE_IND_NONE; + } else { + dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); + } + } else { + dopts.tcache_ind = TCACHE_IND_AUTOMATIC; + } + + if ((flags & MALLOCX_ARENA_MASK) != 0) + dopts.arena_ind = MALLOCX_ARENA_GET(flags); + } + + imalloc(&sopts, &dopts); + if (sopts.slow) { + uintptr_t args[3] = {size, flags}; + hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret, + args); + } + + LOG("core.mallocx.exit", "result: %p", ret); + return ret; +} + +static void * +irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, + size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, + prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) { + void *p; + + if (tctx == NULL) { + return NULL; + } + if (usize <= SC_SMALL_MAXCLASS) { + p = iralloct(tsdn, old_ptr, old_usize, + SC_LARGE_MINCLASS, alignment, zero, tcache, + arena, hook_args); + if (p == NULL) { + return NULL; + } + arena_prof_promote(tsdn, p, usize); + } else { + p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, + tcache, arena, hook_args); + } + + return p; +} + +JEMALLOC_ALWAYS_INLINE void * +irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, + size_t alignment, size_t *usize, bool zero, tcache_t *tcache, + arena_t *arena, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) { + void *p; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); + tctx = prof_alloc_prep(tsd, *usize, prof_active, false); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, + *usize, alignment, zero, tcache, arena, tctx, hook_args); + } else { + p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, + zero, tcache, arena, hook_args); + } + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, false); + return NULL; + } + + if (p == old_ptr && alignment != 0) { + /* + * The allocation did not move, so it is possible that the size + * class is smaller than would guarantee the requested + * alignment, and that the alignment constraint was + * serendipitously satisfied. Additionally, old_usize may not + * be the same as the current usize because of in-place large + * reallocation. Therefore, query the actual value of usize. + */ + *usize = isalloc(tsd_tsdn(tsd), p); + } + prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, + old_usize, old_tctx); + + return p; +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_rallocx(void *ptr, size_t size, int flags) { + void *p; + tsd_t *tsd; + size_t usize; + size_t old_usize; + size_t alignment = MALLOCX_ALIGN_GET(flags); + bool zero = flags & MALLOCX_ZERO; + arena_t *arena; + tcache_t *tcache; + + LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, + size, flags); + + + assert(ptr != NULL); + assert(size != 0); + assert(malloc_initialized() || IS_INITIALIZER); + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { + unsigned arena_ind = MALLOCX_ARENA_GET(flags); + arena = arena_get(tsd_tsdn(tsd), arena_ind, true); + if (unlikely(arena == NULL)) { + goto label_oom; + } + } else { + arena = NULL; + } + + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } + } else { + tcache = tcache_get(tsd); + } + + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != SC_NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); + + hook_ralloc_args_t hook_args = {false, {(uintptr_t)ptr, size, flags, + 0}}; + if (config_prof && opt_prof) { + usize = (alignment == 0) ? + sz_s2u(size) : sz_sa2u(size, alignment); + if (unlikely(usize == 0 + || usize > SC_LARGE_MAXCLASS)) { + goto label_oom; + } + p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, + zero, tcache, arena, &alloc_ctx, &hook_args); + if (unlikely(p == NULL)) { + goto label_oom; + } + } else { + p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, + zero, tcache, arena, &hook_args); + if (unlikely(p == NULL)) { + goto label_oom; + } + if (config_stats) { + usize = isalloc(tsd_tsdn(tsd), p); + } + } + assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); + + if (config_stats) { + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; + } + UTRACE(ptr, size, p); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.rallocx.exit", "result: %p", p); + return p; +label_oom: + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error in rallocx(): out of memory\n"); + abort(); + } + UTRACE(ptr, size, 0); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.rallocx.exit", "result: %p", NULL); + return NULL; +} + +JEMALLOC_ALWAYS_INLINE size_t +ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero) { + size_t newsize; + + if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero, + &newsize)) { + return old_usize; + } + + return newsize; +} + +static size_t +ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { + size_t usize; + + if (tctx == NULL) { + return old_usize; + } + usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, + zero); + + return usize; +} + +JEMALLOC_ALWAYS_INLINE size_t +ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { + size_t usize_max, usize; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); + /* + * usize isn't knowable before ixalloc() returns when extra is non-zero. + * Therefore, compute its maximum possible value and use that in + * prof_alloc_prep() to decide whether to capture a backtrace. + * prof_realloc() will use the actual usize to decide whether to sample. + */ + if (alignment == 0) { + usize_max = sz_s2u(size+extra); + assert(usize_max > 0 + && usize_max <= SC_LARGE_MAXCLASS); + } else { + usize_max = sz_sa2u(size+extra, alignment); + if (unlikely(usize_max == 0 + || usize_max > SC_LARGE_MAXCLASS)) { + /* + * usize_max is out of range, and chances are that + * allocation will fail, but use the maximum possible + * value and carry on with prof_alloc_prep(), just in + * case allocation succeeds. + */ + usize_max = SC_LARGE_MAXCLASS; + } + } + tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); + + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, + size, extra, alignment, zero, tctx); + } else { + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); + } + if (usize == old_usize) { + prof_alloc_rollback(tsd, tctx, false); + return usize; + } + prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, + old_tctx); + + return usize; +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_xallocx(void *ptr, size_t size, size_t extra, int flags) { + tsd_t *tsd; + size_t usize, old_usize; + size_t alignment = MALLOCX_ALIGN_GET(flags); + bool zero = flags & MALLOCX_ZERO; + + LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, " + "flags: %d", ptr, size, extra, flags); + + assert(ptr != NULL); + assert(size != 0); + assert(SIZE_T_MAX - size >= extra); + assert(malloc_initialized() || IS_INITIALIZER); + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != SC_NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); + /* + * The API explicitly absolves itself of protecting against (size + + * extra) numerical overflow, but we may need to clamp extra to avoid + * exceeding SC_LARGE_MAXCLASS. + * + * Ordinarily, size limit checking is handled deeper down, but here we + * have to check as part of (size + extra) clamping, since we need the + * clamped value in the above helper functions. + */ + if (unlikely(size > SC_LARGE_MAXCLASS)) { + usize = old_usize; + goto label_not_resized; + } + if (unlikely(SC_LARGE_MAXCLASS - size < extra)) { + extra = SC_LARGE_MAXCLASS - size; + } + + if (config_prof && opt_prof) { + usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, + alignment, zero, &alloc_ctx); + } else { + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); + } + if (unlikely(usize == old_usize)) { + goto label_not_resized; + } + + if (config_stats) { + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; + } +label_not_resized: + if (unlikely(!tsd_fast(tsd))) { + uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags}; + hook_invoke_expand(hook_expand_xallocx, ptr, old_usize, + usize, (uintptr_t)usize, args); + } + + UTRACE(ptr, size, ptr); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.xallocx.exit", "result: %zu", usize); + return usize; +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +JEMALLOC_ATTR(pure) +je_sallocx(const void *ptr, int flags) { + size_t usize; + tsdn_t *tsdn; + + LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags); + + assert(malloc_initialized() || IS_INITIALIZER); + assert(ptr != NULL); + + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); + + if (config_debug || force_ivsalloc) { + usize = ivsalloc(tsdn, ptr); + assert(force_ivsalloc || usize != 0); + } else { + usize = isalloc(tsdn, ptr); + } + + check_entry_exit_locking(tsdn); + + LOG("core.sallocx.exit", "result: %zu", usize); + return usize; +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_dallocx(void *ptr, int flags) { + LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags); + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + tsd_t *tsd = tsd_fetch(); + bool fast = tsd_fast(tsd); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + /* Not allowed to be reentrant and specify a custom tcache. */ + assert(tsd_reentrancy_level_get(tsd) == 0); + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } + } else { + if (likely(fast)) { + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + } + } + + UTRACE(ptr, 0, 0); + if (likely(fast)) { + tsd_assert_fast(tsd); + ifree(tsd, ptr, tcache, false); + } else { + uintptr_t args_raw[3] = {(uintptr_t)ptr, flags}; + hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw); + ifree(tsd, ptr, tcache, true); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.dallocx.exit", ""); +} + +JEMALLOC_ALWAYS_INLINE size_t +inallocx(tsdn_t *tsdn, size_t size, int flags) { + check_entry_exit_locking(tsdn); + + size_t usize; + if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { + usize = sz_s2u(size); + } else { + usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); + } + check_entry_exit_locking(tsdn); + return usize; +} + +JEMALLOC_NOINLINE void +sdallocx_default(void *ptr, size_t size, int flags) { + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + tsd_t *tsd = tsd_fetch(); + bool fast = tsd_fast(tsd); + size_t usize = inallocx(tsd_tsdn(tsd), size, flags); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + /* Not allowed to be reentrant and specify a custom tcache. */ + assert(tsd_reentrancy_level_get(tsd) == 0); + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } + } else { + if (likely(fast)) { + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + } + } + + UTRACE(ptr, 0, 0); + if (likely(fast)) { + tsd_assert_fast(tsd); + isfree(tsd, ptr, usize, tcache, false); + } else { + uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags}; + hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw); + isfree(tsd, ptr, usize, tcache, true); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_sdallocx(void *ptr, size_t size, int flags) { + LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, + size, flags); + + if (flags !=0 || !free_fastpath(ptr, size, true)) { + sdallocx_default(ptr, size, flags); + } + + LOG("core.sdallocx.exit", ""); +} + +void JEMALLOC_NOTHROW +je_sdallocx_noflags(void *ptr, size_t size) { + LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr, + size); + + if (!free_fastpath(ptr, size, true)) { + sdallocx_default(ptr, size, 0); + } + + LOG("core.sdallocx.exit", ""); +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +JEMALLOC_ATTR(pure) +je_nallocx(size_t size, int flags) { + size_t usize; + tsdn_t *tsdn; + + assert(size != 0); + + if (unlikely(malloc_init())) { + LOG("core.nallocx.exit", "result: %zu", ZU(0)); + return 0; + } + + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); + + usize = inallocx(tsdn, size, flags); + if (unlikely(usize > SC_LARGE_MAXCLASS)) { + LOG("core.nallocx.exit", "result: %zu", ZU(0)); + return 0; + } + + check_entry_exit_locking(tsdn); + LOG("core.nallocx.exit", "result: %zu", usize); + return usize; +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { + int ret; + tsd_t *tsd; + + LOG("core.mallctl.entry", "name: %s", name); + + if (unlikely(malloc_init())) { + LOG("core.mallctl.exit", "result: %d", EAGAIN); + return EAGAIN; + } + + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.mallctl.exit", "result: %d", ret); + return ret; +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { + int ret; + + LOG("core.mallctlnametomib.entry", "name: %s", name); + + if (unlikely(malloc_init())) { + LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN); + return EAGAIN; + } + + tsd_t *tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_nametomib(tsd, name, mibp, miblenp); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.mallctlnametomib.exit", "result: %d", ret); + return ret; +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) { + int ret; + tsd_t *tsd; + + LOG("core.mallctlbymib.entry", ""); + + if (unlikely(malloc_init())) { + LOG("core.mallctlbymib.exit", "result: %d", EAGAIN); + return EAGAIN; + } + + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + check_entry_exit_locking(tsd_tsdn(tsd)); + LOG("core.mallctlbymib.exit", "result: %d", ret); + return ret; +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) { + tsdn_t *tsdn; + + LOG("core.malloc_stats_print.entry", ""); + + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); + stats_print(write_cb, cbopaque, opts); + check_entry_exit_locking(tsdn); + LOG("core.malloc_stats_print.exit", ""); +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { + size_t ret; + tsdn_t *tsdn; + + LOG("core.malloc_usable_size.entry", "ptr: %p", ptr); + + assert(malloc_initialized() || IS_INITIALIZER); + + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); + + if (unlikely(ptr == NULL)) { + ret = 0; + } else { + if (config_debug || force_ivsalloc) { + ret = ivsalloc(tsdn, ptr); + assert(force_ivsalloc || ret != 0); + } else { + ret = isalloc(tsdn, ptr); + } + } + + check_entry_exit_locking(tsdn); + LOG("core.malloc_usable_size.exit", "result: %zu", ret); + return ret; +} + +/* + * End non-standard functions. + */ +/******************************************************************************/ +/* + * The following functions are used by threading libraries for protection of + * malloc during fork(). + */ + +/* + * If an application creates a thread before doing any allocation in the main + * thread, then calls fork(2) in the main thread followed by memory allocation + * in the child process, a race can occur that results in deadlock within the + * child: the main thread may have forked while the created thread had + * partially initialized the allocator. Ordinarily jemalloc prevents + * fork/malloc races via the following functions it registers during + * initialization using pthread_atfork(), but of course that does no good if + * the allocator isn't fully initialized at fork time. The following library + * constructor is a partial solution to this problem. It may still be possible + * to trigger the deadlock described above, but doing so would involve forking + * via a library constructor that runs before jemalloc's runs. + */ +#ifndef JEMALLOC_JET +JEMALLOC_ATTR(constructor) +static void +jemalloc_constructor(void) { + malloc_init(); +} +#endif + +#ifndef JEMALLOC_MUTEX_INIT_CB +void +jemalloc_prefork(void) +#else +JEMALLOC_EXPORT void +_malloc_prefork(void) +#endif +{ + tsd_t *tsd; + unsigned i, j, narenas; + arena_t *arena; + +#ifdef JEMALLOC_MUTEX_INIT_CB + if (!malloc_initialized()) { + return; + } +#endif + assert(malloc_initialized()); + + tsd = tsd_fetch(); + + narenas = narenas_total_get(); + + witness_prefork(tsd_witness_tsdp_get(tsd)); + /* Acquire all mutexes in a safe order. */ + ctl_prefork(tsd_tsdn(tsd)); + tcache_prefork(tsd_tsdn(tsd)); + malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); + if (have_background_thread) { + background_thread_prefork0(tsd_tsdn(tsd)); + } + prof_prefork0(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_prefork1(tsd_tsdn(tsd)); + } + /* Break arena prefork into stages to preserve lock order. */ + for (i = 0; i < 8; i++) { + for (j = 0; j < narenas; j++) { + if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != + NULL) { + switch (i) { + case 0: + arena_prefork0(tsd_tsdn(tsd), arena); + break; + case 1: + arena_prefork1(tsd_tsdn(tsd), arena); + break; + case 2: + arena_prefork2(tsd_tsdn(tsd), arena); + break; + case 3: + arena_prefork3(tsd_tsdn(tsd), arena); + break; + case 4: + arena_prefork4(tsd_tsdn(tsd), arena); + break; + case 5: + arena_prefork5(tsd_tsdn(tsd), arena); + break; + case 6: + arena_prefork6(tsd_tsdn(tsd), arena); + break; + case 7: + arena_prefork7(tsd_tsdn(tsd), arena); + break; + default: not_reached(); + } + } + } + } + prof_prefork1(tsd_tsdn(tsd)); + tsd_prefork(tsd); +} + +#ifndef JEMALLOC_MUTEX_INIT_CB +void +jemalloc_postfork_parent(void) +#else +JEMALLOC_EXPORT void +_malloc_postfork(void) +#endif +{ + tsd_t *tsd; + unsigned i, narenas; + +#ifdef JEMALLOC_MUTEX_INIT_CB + if (!malloc_initialized()) { + return; + } +#endif + assert(malloc_initialized()); + + tsd = tsd_fetch(); + + tsd_postfork_parent(tsd); + + witness_postfork_parent(tsd_witness_tsdp_get(tsd)); + /* Release all mutexes, now that fork() has completed. */ + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; + + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { + arena_postfork_parent(tsd_tsdn(tsd), arena); + } + } + prof_postfork_parent(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_postfork_parent(tsd_tsdn(tsd)); + } + malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); + tcache_postfork_parent(tsd_tsdn(tsd)); + ctl_postfork_parent(tsd_tsdn(tsd)); +} + +void +jemalloc_postfork_child(void) { + tsd_t *tsd; + unsigned i, narenas; + + assert(malloc_initialized()); + + tsd = tsd_fetch(); + + tsd_postfork_child(tsd); + + witness_postfork_child(tsd_witness_tsdp_get(tsd)); + /* Release all mutexes, now that fork() has completed. */ + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; + + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { + arena_postfork_child(tsd_tsdn(tsd), arena); + } + } + prof_postfork_child(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_postfork_child(tsd_tsdn(tsd)); + } + malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); + tcache_postfork_child(tsd_tsdn(tsd)); + ctl_postfork_child(tsd_tsdn(tsd)); +} + +/******************************************************************************/ + +/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation. + * returns 1 if the allocation should be moved, and 0 if the allocation be kept. + * If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */ +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +get_defrag_hint(void* ptr) { + assert(ptr != NULL); + return iget_defrag_hint(TSDN_NULL, ptr); +} diff --git a/deps/jemalloc/src/jemalloc_cpp.cpp b/deps/jemalloc/src/jemalloc_cpp.cpp new file mode 100644 index 0000000..da0441a --- /dev/null +++ b/deps/jemalloc/src/jemalloc_cpp.cpp @@ -0,0 +1,141 @@ +#include +#include + +#define JEMALLOC_CPP_CPP_ +#ifdef __cplusplus +extern "C" { +#endif + +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#ifdef __cplusplus +} +#endif + +// All operators in this file are exported. + +// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt +// thunk? +// +// extern __typeof (sdallocx) sdallocx_int +// __attribute ((alias ("sdallocx"), +// visibility ("hidden"))); +// +// ... but it needs to work with jemalloc namespaces. + +void *operator new(std::size_t size); +void *operator new[](std::size_t size); +void *operator new(std::size_t size, const std::nothrow_t &) noexcept; +void *operator new[](std::size_t size, const std::nothrow_t &) noexcept; +void operator delete(void *ptr) noexcept; +void operator delete[](void *ptr) noexcept; +void operator delete(void *ptr, const std::nothrow_t &) noexcept; +void operator delete[](void *ptr, const std::nothrow_t &) noexcept; + +#if __cpp_sized_deallocation >= 201309 +/* C++14's sized-delete operators. */ +void operator delete(void *ptr, std::size_t size) noexcept; +void operator delete[](void *ptr, std::size_t size) noexcept; +#endif + +JEMALLOC_NOINLINE +static void * +handleOOM(std::size_t size, bool nothrow) { + void *ptr = nullptr; + + while (ptr == nullptr) { + std::new_handler handler; + // GCC-4.8 and clang 4.0 do not have std::get_new_handler. + { + static std::mutex mtx; + std::lock_guard lock(mtx); + + handler = std::set_new_handler(nullptr); + std::set_new_handler(handler); + } + if (handler == nullptr) + break; + + try { + handler(); + } catch (const std::bad_alloc &) { + break; + } + + ptr = je_malloc(size); + } + + if (ptr == nullptr && !nothrow) + std::__throw_bad_alloc(); + return ptr; +} + +template +JEMALLOC_ALWAYS_INLINE +void * +newImpl(std::size_t size) noexcept(IsNoExcept) { + void *ptr = je_malloc(size); + if (likely(ptr != nullptr)) + return ptr; + + return handleOOM(size, IsNoExcept); +} + +void * +operator new(std::size_t size) { + return newImpl(size); +} + +void * +operator new[](std::size_t size) { + return newImpl(size); +} + +void * +operator new(std::size_t size, const std::nothrow_t &) noexcept { + return newImpl(size); +} + +void * +operator new[](std::size_t size, const std::nothrow_t &) noexcept { + return newImpl(size); +} + +void +operator delete(void *ptr) noexcept { + je_free(ptr); +} + +void +operator delete[](void *ptr) noexcept { + je_free(ptr); +} + +void +operator delete(void *ptr, const std::nothrow_t &) noexcept { + je_free(ptr); +} + +void operator delete[](void *ptr, const std::nothrow_t &) noexcept { + je_free(ptr); +} + +#if __cpp_sized_deallocation >= 201309 + +void +operator delete(void *ptr, std::size_t size) noexcept { + if (unlikely(ptr == nullptr)) { + return; + } + je_sdallocx_noflags(ptr, size); +} + +void operator delete[](void *ptr, std::size_t size) noexcept { + if (unlikely(ptr == nullptr)) { + return; + } + je_sdallocx_noflags(ptr, size); +} + +#endif // __cpp_sized_deallocation diff --git a/deps/jemalloc/src/large.c b/deps/jemalloc/src/large.c new file mode 100644 index 0000000..8e7a781 --- /dev/null +++ b/deps/jemalloc/src/large.c @@ -0,0 +1,395 @@ +#define JEMALLOC_LARGE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/util.h" + +/******************************************************************************/ + +void * +large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { + assert(usize == sz_s2u(usize)); + + return large_palloc(tsdn, arena, usize, CACHELINE, zero); +} + +void * +large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero) { + size_t ausize; + extent_t *extent; + bool is_zeroed; + UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); + + assert(!tsdn_null(tsdn) || arena != NULL); + + ausize = sz_sa2u(usize, alignment); + if (unlikely(ausize == 0 || ausize > SC_LARGE_MAXCLASS)) { + return NULL; + } + + if (config_fill && unlikely(opt_zero)) { + zero = true; + } + /* + * Copy zero into is_zeroed and pass the copy when allocating the + * extent, so that it is possible to make correct junk/zero fill + * decisions below, even if is_zeroed ends up true when zero is false. + */ + is_zeroed = zero; + if (likely(!tsdn_null(tsdn))) { + arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize); + } + if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn, + arena, usize, alignment, &is_zeroed)) == NULL) { + return NULL; + } + + /* See comments in arena_bin_slabs_full_insert(). */ + if (!arena_is_auto(arena)) { + /* Insert extent into large. */ + malloc_mutex_lock(tsdn, &arena->large_mtx); + extent_list_append(&arena->large, extent); + malloc_mutex_unlock(tsdn, &arena->large_mtx); + } + if (config_prof && arena_prof_accum(tsdn, arena, usize)) { + prof_idump(tsdn); + } + + if (zero) { + assert(is_zeroed); + } else if (config_fill && unlikely(opt_junk_alloc)) { + memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK, + extent_usize_get(extent)); + } + + arena_decay_tick(tsdn, arena); + return extent_addr_get(extent); +} + +static void +large_dalloc_junk_impl(void *ptr, size_t size) { + memset(ptr, JEMALLOC_FREE_JUNK, size); +} +large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl; + +static void +large_dalloc_maybe_junk_impl(void *ptr, size_t size) { + if (config_fill && have_dss && unlikely(opt_junk_free)) { + /* + * Only bother junk filling if the extent isn't about to be + * unmapped. + */ + if (opt_retain || (have_dss && extent_in_dss(ptr))) { + large_dalloc_junk(ptr, size); + } + } +} +large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk = + large_dalloc_maybe_junk_impl; + +static bool +large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { + arena_t *arena = extent_arena_get(extent); + size_t oldusize = extent_usize_get(extent); + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + size_t diff = extent_size_get(extent) - (usize + sz_large_pad); + + assert(oldusize > usize); + + if (extent_hooks->split == NULL) { + return true; + } + + /* Split excess pages. */ + if (diff != 0) { + extent_t *trail = extent_split_wrapper(tsdn, arena, + &extent_hooks, extent, usize + sz_large_pad, + sz_size2index(usize), false, diff, SC_NSIZES, false); + if (trail == NULL) { + return true; + } + + if (config_fill && unlikely(opt_junk_free)) { + large_dalloc_maybe_junk(extent_addr_get(trail), + extent_size_get(trail)); + } + + arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail); + } + + arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize); + + return false; +} + +static bool +large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, + bool zero) { + arena_t *arena = extent_arena_get(extent); + size_t oldusize = extent_usize_get(extent); + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + size_t trailsize = usize - oldusize; + + if (extent_hooks->merge == NULL) { + return true; + } + + if (config_fill && unlikely(opt_zero)) { + zero = true; + } + /* + * Copy zero into is_zeroed_trail and pass the copy when allocating the + * extent, so that it is possible to make correct junk/zero fill + * decisions below, even if is_zeroed_trail ends up true when zero is + * false. + */ + bool is_zeroed_trail = zero; + bool commit = true; + extent_t *trail; + bool new_mapping; + if ((trail = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, extent_past_get(extent), trailsize, 0, + CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL + || (trail = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, extent_past_get(extent), trailsize, 0, + CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL) { + if (config_stats) { + new_mapping = false; + } + } else { + if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, + extent_past_get(extent), trailsize, 0, CACHELINE, false, + SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) { + return true; + } + if (config_stats) { + new_mapping = true; + } + } + + if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) { + extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail); + return true; + } + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + szind_t szind = sz_size2index(usize); + extent_szind_set(extent, szind); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_addr_get(extent), szind, false); + + if (config_stats && new_mapping) { + arena_stats_mapped_add(tsdn, &arena->stats, trailsize); + } + + if (zero) { + if (config_cache_oblivious) { + /* + * Zero the trailing bytes of the original allocation's + * last page, since they are in an indeterminate state. + * There will always be trailing bytes, because ptr's + * offset from the beginning of the extent is a multiple + * of CACHELINE in [0 .. PAGE). + */ + void *zbase = (void *) + ((uintptr_t)extent_addr_get(extent) + oldusize); + void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + + PAGE)); + size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; + assert(nzero > 0); + memset(zbase, 0, nzero); + } + assert(is_zeroed_trail); + } else if (config_fill && unlikely(opt_junk_alloc)) { + memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize), + JEMALLOC_ALLOC_JUNK, usize - oldusize); + } + + arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize); + + return false; +} + +bool +large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, + size_t usize_max, bool zero) { + size_t oldusize = extent_usize_get(extent); + + /* The following should have been caught by callers. */ + assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS); + /* Both allocation sizes must be large to avoid a move. */ + assert(oldusize >= SC_LARGE_MINCLASS + && usize_max >= SC_LARGE_MINCLASS); + + if (usize_max > oldusize) { + /* Attempt to expand the allocation in-place. */ + if (!large_ralloc_no_move_expand(tsdn, extent, usize_max, + zero)) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + /* Try again, this time with usize_min. */ + if (usize_min < usize_max && usize_min > oldusize && + large_ralloc_no_move_expand(tsdn, extent, usize_min, + zero)) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + } + + /* + * Avoid moving the allocation if the existing extent size accommodates + * the new size. + */ + if (oldusize >= usize_min && oldusize <= usize_max) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + + /* Attempt to shrink the allocation in-place. */ + if (oldusize > usize_max) { + if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + } + return true; +} + +static void * +large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero) { + if (alignment <= CACHELINE) { + return large_malloc(tsdn, arena, usize, zero); + } + return large_palloc(tsdn, arena, usize, alignment, zero); +} + +void * +large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, + size_t alignment, bool zero, tcache_t *tcache, + hook_ralloc_args_t *hook_args) { + extent_t *extent = iealloc(tsdn, ptr); + + size_t oldusize = extent_usize_get(extent); + /* The following should have been caught by callers. */ + assert(usize > 0 && usize <= SC_LARGE_MAXCLASS); + /* Both allocation sizes must be large to avoid a move. */ + assert(oldusize >= SC_LARGE_MINCLASS + && usize >= SC_LARGE_MINCLASS); + + /* Try to avoid moving the allocation. */ + if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { + hook_invoke_expand(hook_args->is_realloc + ? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize, + usize, (uintptr_t)ptr, hook_args->args); + return extent_addr_get(extent); + } + + /* + * usize and old size are different enough that we need to use a + * different size class. In that case, fall back to allocating new + * space and copying. + */ + void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, + zero); + if (ret == NULL) { + return NULL; + } + + hook_invoke_alloc(hook_args->is_realloc + ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret, + hook_args->args); + hook_invoke_dalloc(hook_args->is_realloc + ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); + + size_t copysize = (usize < oldusize) ? usize : oldusize; + memcpy(ret, extent_addr_get(extent), copysize); + isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true); + return ret; +} + +/* + * junked_locked indicates whether the extent's data have been junk-filled, and + * whether the arena's large_mtx is currently held. + */ +static void +large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + bool junked_locked) { + if (!junked_locked) { + /* See comments in arena_bin_slabs_full_insert(). */ + if (!arena_is_auto(arena)) { + malloc_mutex_lock(tsdn, &arena->large_mtx); + extent_list_remove(&arena->large, extent); + malloc_mutex_unlock(tsdn, &arena->large_mtx); + } + large_dalloc_maybe_junk(extent_addr_get(extent), + extent_usize_get(extent)); + } else { + /* Only hold the large_mtx if necessary. */ + if (!arena_is_auto(arena)) { + malloc_mutex_assert_owner(tsdn, &arena->large_mtx); + extent_list_remove(&arena->large, extent); + } + } + arena_extent_dalloc_large_prep(tsdn, arena, extent); +} + +static void +large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent); +} + +void +large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) { + large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true); +} + +void +large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) { + large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent); +} + +void +large_dalloc(tsdn_t *tsdn, extent_t *extent) { + arena_t *arena = extent_arena_get(extent); + large_dalloc_prep_impl(tsdn, arena, extent, false); + large_dalloc_finish_impl(tsdn, arena, extent); + arena_decay_tick(tsdn, arena); +} + +size_t +large_salloc(tsdn_t *tsdn, const extent_t *extent) { + return extent_usize_get(extent); +} + +prof_tctx_t * +large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) { + return extent_prof_tctx_get(extent); +} + +void +large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) { + extent_prof_tctx_set(extent, tctx); +} + +void +large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) { + large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); +} + +nstime_t +large_prof_alloc_time_get(const extent_t *extent) { + return extent_prof_alloc_time_get(extent); +} + +void +large_prof_alloc_time_set(extent_t *extent, nstime_t t) { + extent_prof_alloc_time_set(extent, t); +} diff --git a/deps/jemalloc/src/log.c b/deps/jemalloc/src/log.c new file mode 100644 index 0000000..778902f --- /dev/null +++ b/deps/jemalloc/src/log.c @@ -0,0 +1,78 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/log.h" + +char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; +atomic_b_t log_init_done = ATOMIC_INIT(false); + +/* + * Returns true if we were able to pick out a segment. Fills in r_segment_end + * with a pointer to the first character after the end of the string. + */ +static const char * +log_var_extract_segment(const char* segment_begin) { + const char *end; + for (end = segment_begin; *end != '\0' && *end != '|'; end++) { + } + return end; +} + +static bool +log_var_matches_segment(const char *segment_begin, const char *segment_end, + const char *log_var_begin, const char *log_var_end) { + assert(segment_begin <= segment_end); + assert(log_var_begin < log_var_end); + + ptrdiff_t segment_len = segment_end - segment_begin; + ptrdiff_t log_var_len = log_var_end - log_var_begin; + /* The special '.' segment matches everything. */ + if (segment_len == 1 && *segment_begin == '.') { + return true; + } + if (segment_len == log_var_len) { + return strncmp(segment_begin, log_var_begin, segment_len) == 0; + } else if (segment_len < log_var_len) { + return strncmp(segment_begin, log_var_begin, segment_len) == 0 + && log_var_begin[segment_len] == '.'; + } else { + return false; + } +} + +unsigned +log_var_update_state(log_var_t *log_var) { + const char *log_var_begin = log_var->name; + const char *log_var_end = log_var->name + strlen(log_var->name); + + /* Pointer to one before the beginning of the current segment. */ + const char *segment_begin = log_var_names; + + /* + * If log_init done is false, we haven't parsed the malloc conf yet. To + * avoid log-spew, we default to not displaying anything. + */ + if (!atomic_load_b(&log_init_done, ATOMIC_ACQUIRE)) { + return LOG_INITIALIZED_NOT_ENABLED; + } + + while (true) { + const char *segment_end = log_var_extract_segment( + segment_begin); + assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE); + if (log_var_matches_segment(segment_begin, segment_end, + log_var_begin, log_var_end)) { + atomic_store_u(&log_var->state, LOG_ENABLED, + ATOMIC_RELAXED); + return LOG_ENABLED; + } + if (*segment_end == '\0') { + /* Hit the end of the segment string with no match. */ + atomic_store_u(&log_var->state, + LOG_INITIALIZED_NOT_ENABLED, ATOMIC_RELAXED); + return LOG_INITIALIZED_NOT_ENABLED; + } + /* Otherwise, skip the delimiter and continue. */ + segment_begin = segment_end + 1; + } +} diff --git a/deps/jemalloc/src/malloc_io.c b/deps/jemalloc/src/malloc_io.c new file mode 100644 index 0000000..d7cb0f5 --- /dev/null +++ b/deps/jemalloc/src/malloc_io.c @@ -0,0 +1,675 @@ +#define JEMALLOC_MALLOC_IO_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/util.h" + +#ifdef assert +# undef assert +#endif +#ifdef not_reached +# undef not_reached +#endif +#ifdef not_implemented +# undef not_implemented +#endif +#ifdef assert_not_implemented +# undef assert_not_implemented +#endif + +/* + * Define simple versions of assertion macros that won't recurse in case + * of assertion failures in malloc_*printf(). + */ +#define assert(e) do { \ + if (config_debug && !(e)) { \ + malloc_write(": Failed assertion\n"); \ + abort(); \ + } \ +} while (0) + +#define not_reached() do { \ + if (config_debug) { \ + malloc_write(": Unreachable code reached\n"); \ + abort(); \ + } \ + unreachable(); \ +} while (0) + +#define not_implemented() do { \ + if (config_debug) { \ + malloc_write(": Not implemented\n"); \ + abort(); \ + } \ +} while (0) + +#define assert_not_implemented(e) do { \ + if (unlikely(config_debug && !(e))) { \ + not_implemented(); \ + } \ +} while (0) + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void wrtmessage(void *cbopaque, const char *s); +#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) +static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, + size_t *slen_p); +#define D2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); +#define O2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); +#define X2S_BUFSIZE (2 + U2S_BUFSIZE) +static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, + size_t *slen_p); + +/******************************************************************************/ + +/* malloc_message() setup. */ +static void +wrtmessage(void *cbopaque, const char *s) { + malloc_write_fd(STDERR_FILENO, s, strlen(s)); +} + +JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); + +/* + * Wrapper around malloc_message() that avoids the need for + * je_malloc_message(...) throughout the code. + */ +void +malloc_write(const char *s) { + if (je_malloc_message != NULL) { + je_malloc_message(NULL, s); + } else { + wrtmessage(NULL, s); + } +} + +/* + * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so + * provide a wrapper. + */ +int +buferror(int err, char *buf, size_t buflen) { +#ifdef _WIN32 + FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, + (LPSTR)buf, (DWORD)buflen, NULL); + return 0; +#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE) + char *b = strerror_r(err, buf, buflen); + if (b != buf) { + strncpy(buf, b, buflen); + buf[buflen-1] = '\0'; + } + return 0; +#else + return strerror_r(err, buf, buflen); +#endif +} + +uintmax_t +malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { + uintmax_t ret, digit; + unsigned b; + bool neg; + const char *p, *ns; + + p = nptr; + if (base < 0 || base == 1 || base > 36) { + ns = p; + set_errno(EINVAL); + ret = UINTMAX_MAX; + goto label_return; + } + b = base; + + /* Swallow leading whitespace and get sign, if any. */ + neg = false; + while (true) { + switch (*p) { + case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': + p++; + break; + case '-': + neg = true; + /* Fall through. */ + case '+': + p++; + /* Fall through. */ + default: + goto label_prefix; + } + } + + /* Get prefix, if any. */ + label_prefix: + /* + * Note where the first non-whitespace/sign character is so that it is + * possible to tell whether any digits are consumed (e.g., " 0" vs. + * " -x"). + */ + ns = p; + if (*p == '0') { + switch (p[1]) { + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': + if (b == 0) { + b = 8; + } + if (b == 8) { + p++; + } + break; + case 'X': case 'x': + switch (p[2]) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + if (b == 0) { + b = 16; + } + if (b == 16) { + p += 2; + } + break; + default: + break; + } + break; + default: + p++; + ret = 0; + goto label_return; + } + } + if (b == 0) { + b = 10; + } + + /* Convert. */ + ret = 0; + while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b) + || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b) + || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) { + uintmax_t pret = ret; + ret *= b; + ret += digit; + if (ret < pret) { + /* Overflow. */ + set_errno(ERANGE); + ret = UINTMAX_MAX; + goto label_return; + } + p++; + } + if (neg) { + ret = (uintmax_t)(-((intmax_t)ret)); + } + + if (p == ns) { + /* No conversion performed. */ + set_errno(EINVAL); + ret = UINTMAX_MAX; + goto label_return; + } + +label_return: + if (endptr != NULL) { + if (p == ns) { + /* No characters were converted. */ + *endptr = (char *)nptr; + } else { + *endptr = (char *)p; + } + } + return ret; +} + +static char * +u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) { + unsigned i; + + i = U2S_BUFSIZE - 1; + s[i] = '\0'; + switch (base) { + case 10: + do { + i--; + s[i] = "0123456789"[x % (uint64_t)10]; + x /= (uint64_t)10; + } while (x > 0); + break; + case 16: { + const char *digits = (uppercase) + ? "0123456789ABCDEF" + : "0123456789abcdef"; + + do { + i--; + s[i] = digits[x & 0xf]; + x >>= 4; + } while (x > 0); + break; + } default: { + const char *digits = (uppercase) + ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + : "0123456789abcdefghijklmnopqrstuvwxyz"; + + assert(base >= 2 && base <= 36); + do { + i--; + s[i] = digits[x % (uint64_t)base]; + x /= (uint64_t)base; + } while (x > 0); + }} + + *slen_p = U2S_BUFSIZE - 1 - i; + return &s[i]; +} + +static char * +d2s(intmax_t x, char sign, char *s, size_t *slen_p) { + bool neg; + + if ((neg = (x < 0))) { + x = -x; + } + s = u2s(x, 10, false, s, slen_p); + if (neg) { + sign = '-'; + } + switch (sign) { + case '-': + if (!neg) { + break; + } + /* Fall through. */ + case ' ': + case '+': + s--; + (*slen_p)++; + *s = sign; + break; + default: not_reached(); + } + return s; +} + +static char * +o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) { + s = u2s(x, 8, false, s, slen_p); + if (alt_form && *s != '0') { + s--; + (*slen_p)++; + *s = '0'; + } + return s; +} + +static char * +x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) { + s = u2s(x, 16, uppercase, s, slen_p); + if (alt_form) { + s -= 2; + (*slen_p) += 2; + memcpy(s, uppercase ? "0X" : "0x", 2); + } + return s; +} + +size_t +malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { + size_t i; + const char *f; + +#define APPEND_C(c) do { \ + if (i < size) { \ + str[i] = (c); \ + } \ + i++; \ +} while (0) +#define APPEND_S(s, slen) do { \ + if (i < size) { \ + size_t cpylen = (slen <= size - i) ? slen : size - i; \ + memcpy(&str[i], s, cpylen); \ + } \ + i += slen; \ +} while (0) +#define APPEND_PADDED_S(s, slen, width, left_justify) do { \ + /* Left padding. */ \ + size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ + (size_t)width - slen : 0); \ + if (!left_justify && pad_len != 0) { \ + size_t j; \ + for (j = 0; j < pad_len; j++) { \ + APPEND_C(' '); \ + } \ + } \ + /* Value. */ \ + APPEND_S(s, slen); \ + /* Right padding. */ \ + if (left_justify && pad_len != 0) { \ + size_t j; \ + for (j = 0; j < pad_len; j++) { \ + APPEND_C(' '); \ + } \ + } \ +} while (0) +#define GET_ARG_NUMERIC(val, len) do { \ + switch ((unsigned char)len) { \ + case '?': \ + val = va_arg(ap, int); \ + break; \ + case '?' | 0x80: \ + val = va_arg(ap, unsigned int); \ + break; \ + case 'l': \ + val = va_arg(ap, long); \ + break; \ + case 'l' | 0x80: \ + val = va_arg(ap, unsigned long); \ + break; \ + case 'q': \ + val = va_arg(ap, long long); \ + break; \ + case 'q' | 0x80: \ + val = va_arg(ap, unsigned long long); \ + break; \ + case 'j': \ + val = va_arg(ap, intmax_t); \ + break; \ + case 'j' | 0x80: \ + val = va_arg(ap, uintmax_t); \ + break; \ + case 't': \ + val = va_arg(ap, ptrdiff_t); \ + break; \ + case 'z': \ + val = va_arg(ap, ssize_t); \ + break; \ + case 'z' | 0x80: \ + val = va_arg(ap, size_t); \ + break; \ + case 'p': /* Synthetic; used for %p. */ \ + val = va_arg(ap, uintptr_t); \ + break; \ + default: \ + not_reached(); \ + val = 0; \ + } \ +} while (0) + + i = 0; + f = format; + while (true) { + switch (*f) { + case '\0': goto label_out; + case '%': { + bool alt_form = false; + bool left_justify = false; + bool plus_space = false; + bool plus_plus = false; + int prec = -1; + int width = -1; + unsigned char len = '?'; + char *s; + size_t slen; + + f++; + /* Flags. */ + while (true) { + switch (*f) { + case '#': + assert(!alt_form); + alt_form = true; + break; + case '-': + assert(!left_justify); + left_justify = true; + break; + case ' ': + assert(!plus_space); + plus_space = true; + break; + case '+': + assert(!plus_plus); + plus_plus = true; + break; + default: goto label_width; + } + f++; + } + /* Width. */ + label_width: + switch (*f) { + case '*': + width = va_arg(ap, int); + f++; + if (width < 0) { + left_justify = true; + width = -width; + } + break; + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': { + uintmax_t uwidth; + set_errno(0); + uwidth = malloc_strtoumax(f, (char **)&f, 10); + assert(uwidth != UINTMAX_MAX || get_errno() != + ERANGE); + width = (int)uwidth; + break; + } default: + break; + } + /* Width/precision separator. */ + if (*f == '.') { + f++; + } else { + goto label_length; + } + /* Precision. */ + switch (*f) { + case '*': + prec = va_arg(ap, int); + f++; + break; + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': { + uintmax_t uprec; + set_errno(0); + uprec = malloc_strtoumax(f, (char **)&f, 10); + assert(uprec != UINTMAX_MAX || get_errno() != + ERANGE); + prec = (int)uprec; + break; + } + default: break; + } + /* Length. */ + label_length: + switch (*f) { + case 'l': + f++; + if (*f == 'l') { + len = 'q'; + f++; + } else { + len = 'l'; + } + break; + case 'q': case 'j': case 't': case 'z': + len = *f; + f++; + break; + default: break; + } + /* Conversion specifier. */ + switch (*f) { + case '%': + /* %% */ + APPEND_C(*f); + f++; + break; + case 'd': case 'i': { + intmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[D2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len); + s = d2s(val, (plus_plus ? '+' : (plus_space ? + ' ' : '-')), buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'o': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[O2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len | 0x80); + s = o2s(val, alt_form, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'u': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[U2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len | 0x80); + s = u2s(val, 10, false, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'x': case 'X': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[X2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len | 0x80); + s = x2s(val, alt_form, *f == 'X', buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'c': { + unsigned char val; + char buf[2]; + + assert(len == '?' || len == 'l'); + assert_not_implemented(len != 'l'); + val = va_arg(ap, int); + buf[0] = val; + buf[1] = '\0'; + APPEND_PADDED_S(buf, 1, width, left_justify); + f++; + break; + } case 's': + assert(len == '?' || len == 'l'); + assert_not_implemented(len != 'l'); + s = va_arg(ap, char *); + slen = (prec < 0) ? strlen(s) : (size_t)prec; + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + case 'p': { + uintmax_t val; + char buf[X2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, 'p'); + s = x2s(val, true, false, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } default: not_reached(); + } + break; + } default: { + APPEND_C(*f); + f++; + break; + }} + } + label_out: + if (i < size) { + str[i] = '\0'; + } else { + str[size - 1] = '\0'; + } + +#undef APPEND_C +#undef APPEND_S +#undef APPEND_PADDED_S +#undef GET_ARG_NUMERIC + return i; +} + +JEMALLOC_FORMAT_PRINTF(3, 4) +size_t +malloc_snprintf(char *str, size_t size, const char *format, ...) { + size_t ret; + va_list ap; + + va_start(ap, format); + ret = malloc_vsnprintf(str, size, format, ap); + va_end(ap); + + return ret; +} + +void +malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, va_list ap) { + char buf[MALLOC_PRINTF_BUFSIZE]; + + if (write_cb == NULL) { + /* + * The caller did not provide an alternate write_cb callback + * function, so use the default one. malloc_write() is an + * inline function, so use malloc_message() directly here. + */ + write_cb = (je_malloc_message != NULL) ? je_malloc_message : + wrtmessage; + } + + malloc_vsnprintf(buf, sizeof(buf), format, ap); + write_cb(cbopaque, buf); +} + +/* + * Print to a callback function in such a way as to (hopefully) avoid memory + * allocation. + */ +JEMALLOC_FORMAT_PRINTF(3, 4) +void +malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, ...) { + va_list ap; + + va_start(ap, format); + malloc_vcprintf(write_cb, cbopaque, format, ap); + va_end(ap); +} + +/* Print to stderr in such a way as to avoid memory allocation. */ +JEMALLOC_FORMAT_PRINTF(1, 2) +void +malloc_printf(const char *format, ...) { + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); +} + +/* + * Restore normal assertion macros, in order to make it possible to compile all + * C files as a single concatenation. + */ +#undef assert +#undef not_reached +#undef not_implemented +#undef assert_not_implemented +#include "jemalloc/internal/assert.h" diff --git a/deps/jemalloc/src/mutex.c b/deps/jemalloc/src/mutex.c new file mode 100644 index 0000000..3f920f5 --- /dev/null +++ b/deps/jemalloc/src/mutex.c @@ -0,0 +1,223 @@ +#define JEMALLOC_MUTEX_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/spin.h" + +#ifndef _CRT_SPINCOUNT +#define _CRT_SPINCOUNT 4000 +#endif + +/******************************************************************************/ +/* Data. */ + +#ifdef JEMALLOC_LAZY_LOCK +bool isthreaded = false; +#endif +#ifdef JEMALLOC_MUTEX_INIT_CB +static bool postpone_init = true; +static malloc_mutex_t *postponed_mutexes = NULL; +#endif + +/******************************************************************************/ +/* + * We intercept pthread_create() calls in order to toggle isthreaded if the + * process goes multi-threaded. + */ + +#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) +JEMALLOC_EXPORT int +pthread_create(pthread_t *__restrict thread, + const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), + void *__restrict arg) { + return pthread_create_wrapper(thread, attr, start_routine, arg); +} +#endif + +/******************************************************************************/ + +#ifdef JEMALLOC_MUTEX_INIT_CB +JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, + void *(calloc_cb)(size_t, size_t)); +#endif + +void +malloc_mutex_lock_slow(malloc_mutex_t *mutex) { + mutex_prof_data_t *data = &mutex->prof_data; + nstime_t before = NSTIME_ZERO_INITIALIZER; + + if (ncpus == 1) { + goto label_spin_done; + } + + int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN; + do { + spin_cpu_spinwait(); + if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED) + && !malloc_mutex_trylock_final(mutex)) { + data->n_spin_acquired++; + return; + } + } while (cnt++ < max_cnt); + + if (!config_stats) { + /* Only spin is useful when stats is off. */ + malloc_mutex_lock_final(mutex); + return; + } +label_spin_done: + nstime_update(&before); + /* Copy before to after to avoid clock skews. */ + nstime_t after; + nstime_copy(&after, &before); + uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1, + ATOMIC_RELAXED) + 1; + /* One last try as above two calls may take quite some cycles. */ + if (!malloc_mutex_trylock_final(mutex)) { + atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); + data->n_spin_acquired++; + return; + } + + /* True slow path. */ + malloc_mutex_lock_final(mutex); + /* Update more slow-path only counters. */ + atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); + nstime_update(&after); + + nstime_t delta; + nstime_copy(&delta, &after); + nstime_subtract(&delta, &before); + + data->n_wait_times++; + nstime_add(&data->tot_wait_time, &delta); + if (nstime_compare(&data->max_wait_time, &delta) < 0) { + nstime_copy(&data->max_wait_time, &delta); + } + if (n_thds > data->max_n_thds) { + data->max_n_thds = n_thds; + } +} + +static void +mutex_prof_data_init(mutex_prof_data_t *data) { + memset(data, 0, sizeof(mutex_prof_data_t)); + nstime_init(&data->max_wait_time, 0); + nstime_init(&data->tot_wait_time, 0); + data->prev_owner = NULL; +} + +void +malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_assert_owner(tsdn, mutex); + mutex_prof_data_init(&mutex->prof_data); +} + +static int +mutex_addr_comp(const witness_t *witness1, void *mutex1, + const witness_t *witness2, void *mutex2) { + assert(mutex1 != NULL); + assert(mutex2 != NULL); + uintptr_t mu1int = (uintptr_t)mutex1; + uintptr_t mu2int = (uintptr_t)mutex2; + if (mu1int < mu2int) { + return -1; + } else if (mu1int == mu2int) { + return 0; + } else { + return 1; + } +} + +bool +malloc_mutex_init(malloc_mutex_t *mutex, const char *name, + witness_rank_t rank, malloc_mutex_lock_order_t lock_order) { + mutex_prof_data_init(&mutex->prof_data); +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + InitializeSRWLock(&mutex->lock); +# else + if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, + _CRT_SPINCOUNT)) { + return true; + } +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + mutex->lock = OS_UNFAIR_LOCK_INIT; +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) + if (postpone_init) { + mutex->postponed_next = postponed_mutexes; + postponed_mutexes = mutex; + } else { + if (_pthread_mutex_init_calloc_cb(&mutex->lock, + bootstrap_calloc) != 0) { + return true; + } + } +#else + pthread_mutexattr_t attr; + + if (pthread_mutexattr_init(&attr) != 0) { + return true; + } + pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); + if (pthread_mutex_init(&mutex->lock, &attr) != 0) { + pthread_mutexattr_destroy(&attr); + return true; + } + pthread_mutexattr_destroy(&attr); +#endif + if (config_debug) { + mutex->lock_order = lock_order; + if (lock_order == malloc_mutex_address_ordered) { + witness_init(&mutex->witness, name, rank, + mutex_addr_comp, mutex); + } else { + witness_init(&mutex->witness, name, rank, NULL, NULL); + } + } + return false; +} + +void +malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_lock(tsdn, mutex); +} + +void +malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_unlock(tsdn, mutex); +} + +void +malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { +#ifdef JEMALLOC_MUTEX_INIT_CB + malloc_mutex_unlock(tsdn, mutex); +#else + if (malloc_mutex_init(mutex, mutex->witness.name, + mutex->witness.rank, mutex->lock_order)) { + malloc_printf(": Error re-initializing mutex in " + "child\n"); + if (opt_abort) { + abort(); + } + } +#endif +} + +bool +malloc_mutex_boot(void) { +#ifdef JEMALLOC_MUTEX_INIT_CB + postpone_init = false; + while (postponed_mutexes != NULL) { + if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, + bootstrap_calloc) != 0) { + return true; + } + postponed_mutexes = postponed_mutexes->postponed_next; + } +#endif + return false; +} diff --git a/deps/jemalloc/src/mutex_pool.c b/deps/jemalloc/src/mutex_pool.c new file mode 100644 index 0000000..f24d10e --- /dev/null +++ b/deps/jemalloc/src/mutex_pool.c @@ -0,0 +1,18 @@ +#define JEMALLOC_MUTEX_POOL_C_ + +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" + +bool +mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) { + for (int i = 0; i < MUTEX_POOL_SIZE; ++i) { + if (malloc_mutex_init(&pool->mutexes[i], name, rank, + malloc_mutex_address_ordered)) { + return true; + } + } + return false; +} diff --git a/deps/jemalloc/src/nstime.c b/deps/jemalloc/src/nstime.c new file mode 100644 index 0000000..71db353 --- /dev/null +++ b/deps/jemalloc/src/nstime.c @@ -0,0 +1,170 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/nstime.h" + +#include "jemalloc/internal/assert.h" + +#define BILLION UINT64_C(1000000000) +#define MILLION UINT64_C(1000000) + +void +nstime_init(nstime_t *time, uint64_t ns) { + time->ns = ns; +} + +void +nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) { + time->ns = sec * BILLION + nsec; +} + +uint64_t +nstime_ns(const nstime_t *time) { + return time->ns; +} + +uint64_t +nstime_msec(const nstime_t *time) { + return time->ns / MILLION; +} + +uint64_t +nstime_sec(const nstime_t *time) { + return time->ns / BILLION; +} + +uint64_t +nstime_nsec(const nstime_t *time) { + return time->ns % BILLION; +} + +void +nstime_copy(nstime_t *time, const nstime_t *source) { + *time = *source; +} + +int +nstime_compare(const nstime_t *a, const nstime_t *b) { + return (a->ns > b->ns) - (a->ns < b->ns); +} + +void +nstime_add(nstime_t *time, const nstime_t *addend) { + assert(UINT64_MAX - time->ns >= addend->ns); + + time->ns += addend->ns; +} + +void +nstime_iadd(nstime_t *time, uint64_t addend) { + assert(UINT64_MAX - time->ns >= addend); + + time->ns += addend; +} + +void +nstime_subtract(nstime_t *time, const nstime_t *subtrahend) { + assert(nstime_compare(time, subtrahend) >= 0); + + time->ns -= subtrahend->ns; +} + +void +nstime_isubtract(nstime_t *time, uint64_t subtrahend) { + assert(time->ns >= subtrahend); + + time->ns -= subtrahend; +} + +void +nstime_imultiply(nstime_t *time, uint64_t multiplier) { + assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << + 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns)); + + time->ns *= multiplier; +} + +void +nstime_idivide(nstime_t *time, uint64_t divisor) { + assert(divisor != 0); + + time->ns /= divisor; +} + +uint64_t +nstime_divide(const nstime_t *time, const nstime_t *divisor) { + assert(divisor->ns != 0); + + return time->ns / divisor->ns; +} + +#ifdef _WIN32 +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + FILETIME ft; + uint64_t ticks_100ns; + + GetSystemTimeAsFileTime(&ft); + ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; + + nstime_init(time, ticks_100ns * 100); +} +#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE) +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC) +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME) +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + nstime_init(time, mach_absolute_time()); +} +#else +# define NSTIME_MONOTONIC false +static void +nstime_get(nstime_t *time) { + struct timeval tv; + + gettimeofday(&tv, NULL); + nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000); +} +#endif + +static bool +nstime_monotonic_impl(void) { + return NSTIME_MONOTONIC; +#undef NSTIME_MONOTONIC +} +nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl; + +static bool +nstime_update_impl(nstime_t *time) { + nstime_t old_time; + + nstime_copy(&old_time, time); + nstime_get(time); + + /* Handle non-monotonic clocks. */ + if (unlikely(nstime_compare(&old_time, time) > 0)) { + nstime_copy(time, &old_time); + return true; + } + + return false; +} +nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl; diff --git a/deps/jemalloc/src/pages.c b/deps/jemalloc/src/pages.c new file mode 100644 index 0000000..13de27a --- /dev/null +++ b/deps/jemalloc/src/pages.c @@ -0,0 +1,649 @@ +#define JEMALLOC_PAGES_C_ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/pages.h" + +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +#include +#ifdef __FreeBSD__ +#include +#endif +#endif + +/******************************************************************************/ +/* Data. */ + +/* Actual operating system page size, detected during bootstrap, <= PAGE. */ +static size_t os_page; + +#ifndef _WIN32 +# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) +# define PAGES_PROT_DECOMMIT (PROT_NONE) +static int mmap_flags; +#endif +static bool os_overcommits; + +const char *thp_mode_names[] = { + "default", + "always", + "never", + "not supported" +}; +thp_mode_t opt_thp = THP_MODE_DEFAULT; +thp_mode_t init_system_thp_mode; + +/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */ +static bool pages_can_purge_lazy_runtime = true; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void os_pages_unmap(void *addr, size_t size); + +/******************************************************************************/ + +static void * +os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) { + assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); + assert(ALIGNMENT_CEILING(size, os_page) == size); + assert(size != 0); + + if (os_overcommits) { + *commit = true; + } + + void *ret; +#ifdef _WIN32 + /* + * If VirtualAlloc can't allocate at the given address when one is + * given, it fails and returns NULL. + */ + ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0), + PAGE_READWRITE); +#else + /* + * We don't use MAP_FIXED here, because it can cause the *replacement* + * of existing mappings, and we only want to create new mappings. + */ + { + int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + + ret = mmap(addr, size, prot, mmap_flags, -1, 0); + } + assert(ret != NULL); + + if (ret == MAP_FAILED) { + ret = NULL; + } else if (addr != NULL && ret != addr) { + /* + * We succeeded in mapping memory, but not in the right place. + */ + os_pages_unmap(ret, size); + ret = NULL; + } +#endif + assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && + ret == addr)); + return ret; +} + +static void * +os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, + bool *commit) { + void *ret = (void *)((uintptr_t)addr + leadsize); + + assert(alloc_size >= leadsize + size); +#ifdef _WIN32 + os_pages_unmap(addr, alloc_size); + void *new_addr = os_pages_map(ret, size, PAGE, commit); + if (new_addr == ret) { + return ret; + } + if (new_addr != NULL) { + os_pages_unmap(new_addr, size); + } + return NULL; +#else + size_t trailsize = alloc_size - leadsize - size; + + if (leadsize != 0) { + os_pages_unmap(addr, leadsize); + } + if (trailsize != 0) { + os_pages_unmap((void *)((uintptr_t)ret + size), trailsize); + } + return ret; +#endif +} + +static void +os_pages_unmap(void *addr, size_t size) { + assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); + assert(ALIGNMENT_CEILING(size, os_page) == size); + +#ifdef _WIN32 + if (VirtualFree(addr, 0, MEM_RELEASE) == 0) +#else + if (munmap(addr, size) == -1) +#endif + { + char buf[BUFERROR_BUF]; + + buferror(get_errno(), buf, sizeof(buf)); + malloc_printf(": Error in " +#ifdef _WIN32 + "VirtualFree" +#else + "munmap" +#endif + "(): %s\n", buf); + if (opt_abort) { + abort(); + } + } +} + +static void * +pages_map_slow(size_t size, size_t alignment, bool *commit) { + size_t alloc_size = size + alignment - os_page; + /* Beware size_t wrap-around. */ + if (alloc_size < size) { + return NULL; + } + + void *ret; + do { + void *pages = os_pages_map(NULL, alloc_size, alignment, commit); + if (pages == NULL) { + return NULL; + } + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) + - (uintptr_t)pages; + ret = os_pages_trim(pages, alloc_size, leadsize, size, commit); + } while (ret == NULL); + + assert(ret != NULL); + assert(PAGE_ADDR2BASE(ret) == ret); + return ret; +} + +void * +pages_map(void *addr, size_t size, size_t alignment, bool *commit) { + assert(alignment >= PAGE); + assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr); + +#if defined(__FreeBSD__) && defined(MAP_EXCL) + /* + * FreeBSD has mechanisms both to mmap at specific address without + * touching existing mappings, and to mmap with specific alignment. + */ + { + if (os_overcommits) { + *commit = true; + } + + int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + int flags = mmap_flags; + + if (addr != NULL) { + flags |= MAP_FIXED | MAP_EXCL; + } else { + unsigned alignment_bits = ffs_zu(alignment); + assert(alignment_bits > 1); + flags |= MAP_ALIGNED(alignment_bits - 1); + } + + void *ret = mmap(addr, size, prot, flags, -1, 0); + if (ret == MAP_FAILED) { + ret = NULL; + } + + return ret; + } +#endif + /* + * Ideally, there would be a way to specify alignment to mmap() (like + * NetBSD has), but in the absence of such a feature, we have to work + * hard to efficiently create aligned mappings. The reliable, but + * slow method is to create a mapping that is over-sized, then trim the + * excess. However, that always results in one or two calls to + * os_pages_unmap(), and it can leave holes in the process's virtual + * memory map if memory grows downward. + * + * Optimistically try mapping precisely the right amount before falling + * back to the slow method, with the expectation that the optimistic + * approach works most of the time. + */ + + void *ret = os_pages_map(addr, size, os_page, commit); + if (ret == NULL || ret == addr) { + return ret; + } + assert(addr == NULL); + if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) { + os_pages_unmap(ret, size); + return pages_map_slow(size, alignment, commit); + } + + assert(PAGE_ADDR2BASE(ret) == ret); + return ret; +} + +void +pages_unmap(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + os_pages_unmap(addr, size); +} + +static bool +pages_commit_impl(void *addr, size_t size, bool commit) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + if (os_overcommits) { + return true; + } + +#ifdef _WIN32 + return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, + PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT))); +#else + { + int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, + -1, 0); + if (result == MAP_FAILED) { + return true; + } + if (result != addr) { + /* + * We succeeded in mapping memory, but not in the right + * place. + */ + os_pages_unmap(result, size); + return true; + } + return false; + } +#endif +} + +bool +pages_commit(void *addr, size_t size) { + return pages_commit_impl(addr, size, true); +} + +bool +pages_decommit(void *addr, size_t size) { + return pages_commit_impl(addr, size, false); +} + +bool +pages_purge_lazy(void *addr, size_t size) { + assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); + assert(PAGE_CEILING(size) == size); + + if (!pages_can_purge_lazy) { + return true; + } + if (!pages_can_purge_lazy_runtime) { + /* + * Built with lazy purge enabled, but detected it was not + * supported on the current system. + */ + return true; + } + +#ifdef _WIN32 + VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); + return false; +#elif defined(JEMALLOC_PURGE_MADVISE_FREE) + return (madvise(addr, size, +# ifdef MADV_FREE + MADV_FREE +# else + JEMALLOC_MADV_FREE +# endif + ) != 0); +#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ + !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) + return (madvise(addr, size, MADV_DONTNEED) != 0); +#else + not_reached(); +#endif +} + +bool +pages_purge_forced(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + if (!pages_can_purge_forced) { + return true; + } + +#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ + defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) + return (madvise(addr, size, MADV_DONTNEED) != 0); +#elif defined(JEMALLOC_MAPS_COALESCE) + /* Try to overlay a new demand-zeroed mapping. */ + return pages_commit(addr, size); +#else + not_reached(); +#endif +} + +static bool +pages_huge_impl(void *addr, size_t size, bool aligned) { + if (aligned) { + assert(HUGEPAGE_ADDR2BASE(addr) == addr); + assert(HUGEPAGE_CEILING(size) == size); + } +#ifdef JEMALLOC_HAVE_MADVISE_HUGE + return (madvise(addr, size, MADV_HUGEPAGE) != 0); +#else + return true; +#endif +} + +bool +pages_huge(void *addr, size_t size) { + return pages_huge_impl(addr, size, true); +} + +static bool +pages_huge_unaligned(void *addr, size_t size) { + return pages_huge_impl(addr, size, false); +} + +static bool +pages_nohuge_impl(void *addr, size_t size, bool aligned) { + if (aligned) { + assert(HUGEPAGE_ADDR2BASE(addr) == addr); + assert(HUGEPAGE_CEILING(size) == size); + } + +#ifdef JEMALLOC_HAVE_MADVISE_HUGE + return (madvise(addr, size, MADV_NOHUGEPAGE) != 0); +#else + return false; +#endif +} + +bool +pages_nohuge(void *addr, size_t size) { + return pages_nohuge_impl(addr, size, true); +} + +static bool +pages_nohuge_unaligned(void *addr, size_t size) { + return pages_nohuge_impl(addr, size, false); +} + +bool +pages_dontdump(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); +#ifdef JEMALLOC_MADVISE_DONTDUMP + return madvise(addr, size, MADV_DONTDUMP) != 0; +#else + return false; +#endif +} + +bool +pages_dodump(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); +#ifdef JEMALLOC_MADVISE_DONTDUMP + return madvise(addr, size, MADV_DODUMP) != 0; +#else + return false; +#endif +} + + +static size_t +os_page_detect(void) { +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwPageSize; +#elif defined(__FreeBSD__) + /* + * This returns the value obtained from + * the auxv vector, avoiding a syscall. + */ + return getpagesize(); +#else + long result = sysconf(_SC_PAGESIZE); + if (result == -1) { + return LG_PAGE; + } + return (size_t)result; +#endif +} + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +static bool +os_overcommits_sysctl(void) { + int vm_overcommit; + size_t sz; + + sz = sizeof(vm_overcommit); +#if defined(__FreeBSD__) && defined(VM_OVERCOMMIT) + int mib[2]; + + mib[0] = CTL_VM; + mib[1] = VM_OVERCOMMIT; + if (sysctl(mib, 2, &vm_overcommit, &sz, NULL, 0) != 0) { + return false; /* Error. */ + } +#else + if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) { + return false; /* Error. */ + } +#endif + + return ((vm_overcommit & 0x3) == 0); +} +#endif + +#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY +/* + * Use syscall(2) rather than {open,read,close}(2) when possible to avoid + * reentry during bootstrapping if another library has interposed system call + * wrappers. + */ +static bool +os_overcommits_proc(void) { + int fd; + char buf[1]; + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) + #if defined(O_CLOEXEC) + fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY | + O_CLOEXEC); + #else + fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY); + if (fd != -1) { + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); + } + #endif +#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat) + #if defined(O_CLOEXEC) + fd = (int)syscall(SYS_openat, + AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); + #else + fd = (int)syscall(SYS_openat, + AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY); + if (fd != -1) { + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); + } + #endif +#else + #if defined(O_CLOEXEC) + fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); + #else + fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); + if (fd != -1) { + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); + } + #endif +#endif + + if (fd == -1) { + return false; /* Error. */ + } + + ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf)); +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) + syscall(SYS_close, fd); +#else + close(fd); +#endif + + if (nread < 1) { + return false; /* Error. */ + } + /* + * /proc/sys/vm/overcommit_memory meanings: + * 0: Heuristic overcommit. + * 1: Always overcommit. + * 2: Never overcommit. + */ + return (buf[0] == '0' || buf[0] == '1'); +} +#endif + +void +pages_set_thp_state (void *ptr, size_t size) { + if (opt_thp == thp_mode_default || opt_thp == init_system_thp_mode) { + return; + } + assert(opt_thp != thp_mode_not_supported && + init_system_thp_mode != thp_mode_not_supported); + + if (opt_thp == thp_mode_always + && init_system_thp_mode != thp_mode_never) { + assert(init_system_thp_mode == thp_mode_default); + pages_huge_unaligned(ptr, size); + } else if (opt_thp == thp_mode_never) { + assert(init_system_thp_mode == thp_mode_default || + init_system_thp_mode == thp_mode_always); + pages_nohuge_unaligned(ptr, size); + } +} + +static void +init_thp_state(void) { + if (!have_madvise_huge) { + if (metadata_thp_enabled() && opt_abort) { + malloc_write(": no MADV_HUGEPAGE support\n"); + abort(); + } + goto label_error; + } + + static const char sys_state_madvise[] = "always [madvise] never\n"; + static const char sys_state_always[] = "[always] madvise never\n"; + static const char sys_state_never[] = "always madvise [never]\n"; + char buf[sizeof(sys_state_madvise)]; + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) + int fd = (int)syscall(SYS_open, + "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); +#else + int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); +#endif + if (fd == -1) { + goto label_error; + } + + ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf)); +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) + syscall(SYS_close, fd); +#else + close(fd); +#endif + + if (nread < 0) { + goto label_error; + } + + if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) { + init_system_thp_mode = thp_mode_default; + } else if (strncmp(buf, sys_state_always, (size_t)nread) == 0) { + init_system_thp_mode = thp_mode_always; + } else if (strncmp(buf, sys_state_never, (size_t)nread) == 0) { + init_system_thp_mode = thp_mode_never; + } else { + goto label_error; + } + return; +label_error: + opt_thp = init_system_thp_mode = thp_mode_not_supported; +} + +bool +pages_boot(void) { + os_page = os_page_detect(); + if (os_page > PAGE) { + malloc_write(": Unsupported system page size\n"); + if (opt_abort) { + abort(); + } + return true; + } + +#ifndef _WIN32 + mmap_flags = MAP_PRIVATE | MAP_ANON; +#endif + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT + os_overcommits = os_overcommits_sysctl(); +#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY) + os_overcommits = os_overcommits_proc(); +# ifdef MAP_NORESERVE + if (os_overcommits) { + mmap_flags |= MAP_NORESERVE; + } +# endif +#else + os_overcommits = false; +#endif + + init_thp_state(); + +#ifdef __FreeBSD__ + /* + * FreeBSD doesn't need the check; madvise(2) is known to work. + */ +#else + /* Detect lazy purge runtime support. */ + if (pages_can_purge_lazy) { + bool committed = false; + void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed); + if (madv_free_page == NULL) { + return true; + } + assert(pages_can_purge_lazy_runtime); + if (pages_purge_lazy(madv_free_page, PAGE)) { + pages_can_purge_lazy_runtime = false; + } + os_pages_unmap(madv_free_page, PAGE); + } +#endif + + return false; +} diff --git a/deps/jemalloc/src/prng.c b/deps/jemalloc/src/prng.c new file mode 100644 index 0000000..83c04bf --- /dev/null +++ b/deps/jemalloc/src/prng.c @@ -0,0 +1,3 @@ +#define JEMALLOC_PRNG_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/deps/jemalloc/src/prof.c b/deps/jemalloc/src/prof.c new file mode 100644 index 0000000..13334cb --- /dev/null +++ b/deps/jemalloc/src/prof.c @@ -0,0 +1,3160 @@ +#define JEMALLOC_PROF_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/emitter.h" + +/******************************************************************************/ + +#ifdef JEMALLOC_PROF_LIBUNWIND +#define UNW_LOCAL_ONLY +#include +#endif + +#ifdef JEMALLOC_PROF_LIBGCC +/* + * We have a circular dependency -- jemalloc_internal.h tells us if we should + * use libgcc's unwinding functionality, but after we've included that, we've + * already hooked _Unwind_Backtrace. We'll temporarily disable hooking. + */ +#undef _Unwind_Backtrace +#include +#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook) +#endif + +/******************************************************************************/ +/* Data. */ + +bool opt_prof = false; +bool opt_prof_active = true; +bool opt_prof_thread_active_init = true; +size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; +ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; +bool opt_prof_gdump = false; +bool opt_prof_final = false; +bool opt_prof_leak = false; +bool opt_prof_accum = false; +bool opt_prof_log = false; +char opt_prof_prefix[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + +/* + * Initialized as opt_prof_active, and accessed via + * prof_active_[gs]et{_unlocked,}(). + */ +bool prof_active; +static malloc_mutex_t prof_active_mtx; + +/* + * Initialized as opt_prof_thread_active_init, and accessed via + * prof_thread_active_init_[gs]et(). + */ +static bool prof_thread_active_init; +static malloc_mutex_t prof_thread_active_init_mtx; + +/* + * Initialized as opt_prof_gdump, and accessed via + * prof_gdump_[gs]et{_unlocked,}(). + */ +bool prof_gdump_val; +static malloc_mutex_t prof_gdump_mtx; + +uint64_t prof_interval = 0; + +size_t lg_prof_sample; + +typedef enum prof_logging_state_e prof_logging_state_t; +enum prof_logging_state_e { + prof_logging_state_stopped, + prof_logging_state_started, + prof_logging_state_dumping +}; + +/* + * - stopped: log_start never called, or previous log_stop has completed. + * - started: log_start called, log_stop not called yet. Allocations are logged. + * - dumping: log_stop called but not finished; samples are not logged anymore. + */ +prof_logging_state_t prof_logging_state = prof_logging_state_stopped; + +#ifdef JEMALLOC_JET +static bool prof_log_dummy = false; +#endif + +/* Incremented for every log file that is output. */ +static uint64_t log_seq = 0; +static char log_filename[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + +/* Timestamp for most recent call to log_start(). */ +static nstime_t log_start_timestamp = NSTIME_ZERO_INITIALIZER; + +/* Increment these when adding to the log_bt and log_thr linked lists. */ +static size_t log_bt_index = 0; +static size_t log_thr_index = 0; + +/* Linked list node definitions. These are only used in prof.c. */ +typedef struct prof_bt_node_s prof_bt_node_t; + +struct prof_bt_node_s { + prof_bt_node_t *next; + size_t index; + prof_bt_t bt; + /* Variable size backtrace vector pointed to by bt. */ + void *vec[1]; +}; + +typedef struct prof_thr_node_s prof_thr_node_t; + +struct prof_thr_node_s { + prof_thr_node_t *next; + size_t index; + uint64_t thr_uid; + /* Variable size based on thr_name_sz. */ + char name[1]; +}; + +typedef struct prof_alloc_node_s prof_alloc_node_t; + +/* This is output when logging sampled allocations. */ +struct prof_alloc_node_s { + prof_alloc_node_t *next; + /* Indices into an array of thread data. */ + size_t alloc_thr_ind; + size_t free_thr_ind; + + /* Indices into an array of backtraces. */ + size_t alloc_bt_ind; + size_t free_bt_ind; + + uint64_t alloc_time_ns; + uint64_t free_time_ns; + + size_t usize; +}; + +/* + * Created on the first call to prof_log_start and deleted on prof_log_stop. + * These are the backtraces and threads that have already been logged by an + * allocation. + */ +static bool log_tables_initialized = false; +static ckh_t log_bt_node_set; +static ckh_t log_thr_node_set; + +/* Store linked lists for logged data. */ +static prof_bt_node_t *log_bt_first = NULL; +static prof_bt_node_t *log_bt_last = NULL; +static prof_thr_node_t *log_thr_first = NULL; +static prof_thr_node_t *log_thr_last = NULL; +static prof_alloc_node_t *log_alloc_first = NULL; +static prof_alloc_node_t *log_alloc_last = NULL; + +/* Protects the prof_logging_state and any log_{...} variable. */ +static malloc_mutex_t log_mtx; + +/* + * Table of mutexes that are shared among gctx's. These are leaf locks, so + * there is no problem with using them for more than one gctx at the same time. + * The primary motivation for this sharing though is that gctx's are ephemeral, + * and destroying mutexes causes complications for systems that allocate when + * creating/destroying mutexes. + */ +static malloc_mutex_t *gctx_locks; +static atomic_u_t cum_gctxs; /* Atomic counter. */ + +/* + * Table of mutexes that are shared among tdata's. No operations require + * holding multiple tdata locks, so there is no problem with using them for more + * than one tdata at the same time, even though a gctx lock may be acquired + * while holding a tdata lock. + */ +static malloc_mutex_t *tdata_locks; + +/* + * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data + * structure that knows about all backtraces currently captured. + */ +static ckh_t bt2gctx; +/* Non static to enable profiling. */ +malloc_mutex_t bt2gctx_mtx; + +/* + * Tree of all extant prof_tdata_t structures, regardless of state, + * {attached,detached,expired}. + */ +static prof_tdata_tree_t tdatas; +static malloc_mutex_t tdatas_mtx; + +static uint64_t next_thr_uid; +static malloc_mutex_t next_thr_uid_mtx; + +static malloc_mutex_t prof_dump_seq_mtx; +static uint64_t prof_dump_seq; +static uint64_t prof_dump_iseq; +static uint64_t prof_dump_mseq; +static uint64_t prof_dump_useq; + +/* + * This buffer is rather large for stack allocation, so use a single buffer for + * all profile dumps. + */ +static malloc_mutex_t prof_dump_mtx; +static char prof_dump_buf[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PROF_DUMP_BUFSIZE +#else + 1 +#endif +]; +static size_t prof_dump_buf_end; +static int prof_dump_fd; + +/* Do not dump any profiles until bootstrapping is complete. */ +static bool prof_booted = false; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); +static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); +static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached); +static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached); +static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); + +/* Hashtable functions for log_bt_node_set and log_thr_node_set. */ +static void prof_thr_node_hash(const void *key, size_t r_hash[2]); +static bool prof_thr_node_keycomp(const void *k1, const void *k2); +static void prof_bt_node_hash(const void *key, size_t r_hash[2]); +static bool prof_bt_node_keycomp(const void *k1, const void *k2); + +/******************************************************************************/ +/* Red-black trees. */ + +static int +prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { + uint64_t a_thr_uid = a->thr_uid; + uint64_t b_thr_uid = b->thr_uid; + int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); + if (ret == 0) { + uint64_t a_thr_discrim = a->thr_discrim; + uint64_t b_thr_discrim = b->thr_discrim; + ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < + b_thr_discrim); + if (ret == 0) { + uint64_t a_tctx_uid = a->tctx_uid; + uint64_t b_tctx_uid = b->tctx_uid; + ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < + b_tctx_uid); + } + } + return ret; +} + +rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, + tctx_link, prof_tctx_comp) + +static int +prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { + unsigned a_len = a->bt.len; + unsigned b_len = b->bt.len; + unsigned comp_len = (a_len < b_len) ? a_len : b_len; + int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); + if (ret == 0) { + ret = (a_len > b_len) - (a_len < b_len); + } + return ret; +} + +rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, + prof_gctx_comp) + +static int +prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { + int ret; + uint64_t a_uid = a->thr_uid; + uint64_t b_uid = b->thr_uid; + + ret = ((a_uid > b_uid) - (a_uid < b_uid)); + if (ret == 0) { + uint64_t a_discrim = a->thr_discrim; + uint64_t b_discrim = b->thr_discrim; + + ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); + } + return ret; +} + +rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, + prof_tdata_comp) + +/******************************************************************************/ + +void +prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { + prof_tdata_t *tdata; + + cassert(config_prof); + + if (updated) { + /* + * Compute a new sample threshold. This isn't very important in + * practice, because this function is rarely executed, so the + * potential for sample bias is minimal except in contrived + * programs. + */ + tdata = prof_tdata_get(tsd, true); + if (tdata != NULL) { + prof_sample_threshold_update(tdata); + } + } + + if ((uintptr_t)tctx > (uintptr_t)1U) { + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); + tctx->prepared = false; + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { + prof_tctx_destroy(tsd, tctx); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } + } +} + +void +prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx) { + prof_tctx_set(tsdn, ptr, usize, NULL, tctx); + + /* Get the current time and set this in the extent_t. We'll read this + * when free() is called. */ + nstime_t t = NSTIME_ZERO_INITIALIZER; + nstime_update(&t); + prof_alloc_time_set(tsdn, ptr, NULL, t); + + malloc_mutex_lock(tsdn, tctx->tdata->lock); + tctx->cnts.curobjs++; + tctx->cnts.curbytes += usize; + if (opt_prof_accum) { + tctx->cnts.accumobjs++; + tctx->cnts.accumbytes += usize; + } + tctx->prepared = false; + malloc_mutex_unlock(tsdn, tctx->tdata->lock); +} + +static size_t +prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) { + assert(prof_logging_state == prof_logging_state_started); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); + + prof_bt_node_t dummy_node; + dummy_node.bt = *bt; + prof_bt_node_t *node; + + /* See if this backtrace is already cached in the table. */ + if (ckh_search(&log_bt_node_set, (void *)(&dummy_node), + (void **)(&node), NULL)) { + size_t sz = offsetof(prof_bt_node_t, vec) + + (bt->len * sizeof(void *)); + prof_bt_node_t *new_node = (prof_bt_node_t *) + iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, + true, arena_get(TSDN_NULL, 0, true), true); + if (log_bt_first == NULL) { + log_bt_first = new_node; + log_bt_last = new_node; + } else { + log_bt_last->next = new_node; + log_bt_last = new_node; + } + + new_node->next = NULL; + new_node->index = log_bt_index; + /* + * Copy the backtrace: bt is inside a tdata or gctx, which + * might die before prof_log_stop is called. + */ + new_node->bt.len = bt->len; + memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *)); + new_node->bt.vec = new_node->vec; + + log_bt_index++; + ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL); + return new_node->index; + } else { + return node->index; + } +} +static size_t +prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) { + assert(prof_logging_state == prof_logging_state_started); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx); + + prof_thr_node_t dummy_node; + dummy_node.thr_uid = thr_uid; + prof_thr_node_t *node; + + /* See if this thread is already cached in the table. */ + if (ckh_search(&log_thr_node_set, (void *)(&dummy_node), + (void **)(&node), NULL)) { + size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1; + prof_thr_node_t *new_node = (prof_thr_node_t *) + iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, + true, arena_get(TSDN_NULL, 0, true), true); + if (log_thr_first == NULL) { + log_thr_first = new_node; + log_thr_last = new_node; + } else { + log_thr_last->next = new_node; + log_thr_last = new_node; + } + + new_node->next = NULL; + new_node->index = log_thr_index; + new_node->thr_uid = thr_uid; + strcpy(new_node->name, name); + + log_thr_index++; + ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL); + return new_node->index; + } else { + return node->index; + } +} + +static void +prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false); + if (cons_tdata == NULL) { + /* + * We decide not to log these allocations. cons_tdata will be + * NULL only when the current thread is in a weird state (e.g. + * it's being destroyed). + */ + return; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx); + + if (prof_logging_state != prof_logging_state_started) { + goto label_done; + } + + if (!log_tables_initialized) { + bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, + prof_bt_node_hash, prof_bt_node_keycomp); + bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, + prof_thr_node_hash, prof_thr_node_keycomp); + if (err1 || err2) { + goto label_done; + } + log_tables_initialized = true; + } + + nstime_t alloc_time = prof_alloc_time_get(tsd_tsdn(tsd), ptr, + (alloc_ctx_t *)NULL); + nstime_t free_time = NSTIME_ZERO_INITIALIZER; + nstime_update(&free_time); + + size_t sz = sizeof(prof_alloc_node_t); + prof_alloc_node_t *new_node = (prof_alloc_node_t *) + iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + + const char *prod_thr_name = (tctx->tdata->thread_name == NULL)? + "" : tctx->tdata->thread_name; + const char *cons_thr_name = prof_thread_name_get(tsd); + + prof_bt_t bt; + /* Initialize the backtrace, using the buffer in tdata to store it. */ + bt_init(&bt, cons_tdata->vec); + prof_backtrace(&bt); + prof_bt_t *cons_bt = &bt; + + /* We haven't destroyed tctx yet, so gctx should be good to read. */ + prof_bt_t *prod_bt = &tctx->gctx->bt; + + new_node->next = NULL; + new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid, + prod_thr_name); + new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid, + cons_thr_name); + new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt); + new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt); + new_node->alloc_time_ns = nstime_ns(&alloc_time); + new_node->free_time_ns = nstime_ns(&free_time); + new_node->usize = usize; + + if (log_alloc_first == NULL) { + log_alloc_first = new_node; + log_alloc_last = new_node; + } else { + log_alloc_last->next = new_node; + log_alloc_last = new_node; + } + +label_done: + malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx); +} + +void +prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize, + prof_tctx_t *tctx) { + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); + + assert(tctx->cnts.curobjs > 0); + assert(tctx->cnts.curbytes >= usize); + tctx->cnts.curobjs--; + tctx->cnts.curbytes -= usize; + + prof_try_log(tsd, ptr, usize, tctx); + + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { + prof_tctx_destroy(tsd, tctx); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } +} + +void +bt_init(prof_bt_t *bt, void **vec) { + cassert(config_prof); + + bt->vec = vec; + bt->len = 0; +} + +static void +prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + if (tdata != NULL) { + assert(!tdata->enq); + tdata->enq = true; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); +} + +static void +prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); + + if (tdata != NULL) { + bool idump, gdump; + + assert(tdata->enq); + tdata->enq = false; + idump = tdata->enq_idump; + tdata->enq_idump = false; + gdump = tdata->enq_gdump; + tdata->enq_gdump = false; + + if (idump) { + prof_idump(tsd_tsdn(tsd)); + } + if (gdump) { + prof_gdump(tsd_tsdn(tsd)); + } + } +} + +#ifdef JEMALLOC_PROF_LIBUNWIND +void +prof_backtrace(prof_bt_t *bt) { + int nframes; + + cassert(config_prof); + assert(bt->len == 0); + assert(bt->vec != NULL); + + nframes = unw_backtrace(bt->vec, PROF_BT_MAX); + if (nframes <= 0) { + return; + } + bt->len = nframes; +} +#elif (defined(JEMALLOC_PROF_LIBGCC)) +static _Unwind_Reason_Code +prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { + cassert(config_prof); + + return _URC_NO_REASON; +} + +static _Unwind_Reason_Code +prof_unwind_callback(struct _Unwind_Context *context, void *arg) { + prof_unwind_data_t *data = (prof_unwind_data_t *)arg; + void *ip; + + cassert(config_prof); + + ip = (void *)_Unwind_GetIP(context); + if (ip == NULL) { + return _URC_END_OF_STACK; + } + data->bt->vec[data->bt->len] = ip; + data->bt->len++; + if (data->bt->len == data->max) { + return _URC_END_OF_STACK; + } + + return _URC_NO_REASON; +} + +void +prof_backtrace(prof_bt_t *bt) { + prof_unwind_data_t data = {bt, PROF_BT_MAX}; + + cassert(config_prof); + + _Unwind_Backtrace(prof_unwind_callback, &data); +} +#elif (defined(JEMALLOC_PROF_GCC)) +void +prof_backtrace(prof_bt_t *bt) { +#define BT_FRAME(i) \ + if ((i) < PROF_BT_MAX) { \ + void *p; \ + if (__builtin_frame_address(i) == 0) { \ + return; \ + } \ + p = __builtin_return_address(i); \ + if (p == NULL) { \ + return; \ + } \ + bt->vec[(i)] = p; \ + bt->len = (i) + 1; \ + } else { \ + return; \ + } + + cassert(config_prof); + + BT_FRAME(0) + BT_FRAME(1) + BT_FRAME(2) + BT_FRAME(3) + BT_FRAME(4) + BT_FRAME(5) + BT_FRAME(6) + BT_FRAME(7) + BT_FRAME(8) + BT_FRAME(9) + + BT_FRAME(10) + BT_FRAME(11) + BT_FRAME(12) + BT_FRAME(13) + BT_FRAME(14) + BT_FRAME(15) + BT_FRAME(16) + BT_FRAME(17) + BT_FRAME(18) + BT_FRAME(19) + + BT_FRAME(20) + BT_FRAME(21) + BT_FRAME(22) + BT_FRAME(23) + BT_FRAME(24) + BT_FRAME(25) + BT_FRAME(26) + BT_FRAME(27) + BT_FRAME(28) + BT_FRAME(29) + + BT_FRAME(30) + BT_FRAME(31) + BT_FRAME(32) + BT_FRAME(33) + BT_FRAME(34) + BT_FRAME(35) + BT_FRAME(36) + BT_FRAME(37) + BT_FRAME(38) + BT_FRAME(39) + + BT_FRAME(40) + BT_FRAME(41) + BT_FRAME(42) + BT_FRAME(43) + BT_FRAME(44) + BT_FRAME(45) + BT_FRAME(46) + BT_FRAME(47) + BT_FRAME(48) + BT_FRAME(49) + + BT_FRAME(50) + BT_FRAME(51) + BT_FRAME(52) + BT_FRAME(53) + BT_FRAME(54) + BT_FRAME(55) + BT_FRAME(56) + BT_FRAME(57) + BT_FRAME(58) + BT_FRAME(59) + + BT_FRAME(60) + BT_FRAME(61) + BT_FRAME(62) + BT_FRAME(63) + BT_FRAME(64) + BT_FRAME(65) + BT_FRAME(66) + BT_FRAME(67) + BT_FRAME(68) + BT_FRAME(69) + + BT_FRAME(70) + BT_FRAME(71) + BT_FRAME(72) + BT_FRAME(73) + BT_FRAME(74) + BT_FRAME(75) + BT_FRAME(76) + BT_FRAME(77) + BT_FRAME(78) + BT_FRAME(79) + + BT_FRAME(80) + BT_FRAME(81) + BT_FRAME(82) + BT_FRAME(83) + BT_FRAME(84) + BT_FRAME(85) + BT_FRAME(86) + BT_FRAME(87) + BT_FRAME(88) + BT_FRAME(89) + + BT_FRAME(90) + BT_FRAME(91) + BT_FRAME(92) + BT_FRAME(93) + BT_FRAME(94) + BT_FRAME(95) + BT_FRAME(96) + BT_FRAME(97) + BT_FRAME(98) + BT_FRAME(99) + + BT_FRAME(100) + BT_FRAME(101) + BT_FRAME(102) + BT_FRAME(103) + BT_FRAME(104) + BT_FRAME(105) + BT_FRAME(106) + BT_FRAME(107) + BT_FRAME(108) + BT_FRAME(109) + + BT_FRAME(110) + BT_FRAME(111) + BT_FRAME(112) + BT_FRAME(113) + BT_FRAME(114) + BT_FRAME(115) + BT_FRAME(116) + BT_FRAME(117) + BT_FRAME(118) + BT_FRAME(119) + + BT_FRAME(120) + BT_FRAME(121) + BT_FRAME(122) + BT_FRAME(123) + BT_FRAME(124) + BT_FRAME(125) + BT_FRAME(126) + BT_FRAME(127) +#undef BT_FRAME +} +#else +void +prof_backtrace(prof_bt_t *bt) { + cassert(config_prof); + not_reached(); +} +#endif + +static malloc_mutex_t * +prof_gctx_mutex_choose(void) { + unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); + + return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; +} + +static malloc_mutex_t * +prof_tdata_mutex_choose(uint64_t thr_uid) { + return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; +} + +static prof_gctx_t * +prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { + /* + * Create a single allocation that has space for vec of length bt->len. + */ + size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); + prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, + sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), + true); + if (gctx == NULL) { + return NULL; + } + gctx->lock = prof_gctx_mutex_choose(); + /* + * Set nlimbo to 1, in order to avoid a race condition with + * prof_tctx_destroy()/prof_gctx_try_destroy(). + */ + gctx->nlimbo = 1; + tctx_tree_new(&gctx->tctxs); + /* Duplicate bt. */ + memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); + gctx->bt.vec = gctx->vec; + gctx->bt.len = bt->len; + return gctx; +} + +static void +prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, + prof_tdata_t *tdata) { + cassert(config_prof); + + /* + * Check that gctx is still unused by any thread cache before destroying + * it. prof_lookup() increments gctx->nlimbo in order to avoid a race + * condition with this function, as does prof_tctx_destroy() in order to + * avoid a race between the main body of prof_tctx_destroy() and entry + * into this function. + */ + prof_enter(tsd, tdata_self); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + assert(gctx->nlimbo != 0); + if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { + /* Remove gctx from bt2gctx. */ + if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { + not_reached(); + } + prof_leave(tsd, tdata_self); + /* Destroy gctx. */ + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); + } else { + /* + * Compensate for increment in prof_tctx_destroy() or + * prof_lookup(). + */ + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_leave(tsd, tdata_self); + } +} + +static bool +prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + + if (opt_prof_accum) { + return false; + } + if (tctx->cnts.curobjs != 0) { + return false; + } + if (tctx->prepared) { + return false; + } + return true; +} + +static bool +prof_gctx_should_destroy(prof_gctx_t *gctx) { + if (opt_prof_accum) { + return false; + } + if (!tctx_tree_empty(&gctx->tctxs)) { + return false; + } + if (gctx->nlimbo != 0) { + return false; + } + return true; +} + +static void +prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { + prof_tdata_t *tdata = tctx->tdata; + prof_gctx_t *gctx = tctx->gctx; + bool destroy_tdata, destroy_tctx, destroy_gctx; + + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + assert(tctx->cnts.curobjs == 0); + assert(tctx->cnts.curbytes == 0); + assert(!opt_prof_accum); + assert(tctx->cnts.accumobjs == 0); + assert(tctx->cnts.accumbytes == 0); + + ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + switch (tctx->state) { + case prof_tctx_state_nominal: + tctx_tree_remove(&gctx->tctxs, tctx); + destroy_tctx = true; + if (prof_gctx_should_destroy(gctx)) { + /* + * Increment gctx->nlimbo in order to keep another + * thread from winning the race to destroy gctx while + * this one has gctx->lock dropped. Without this, it + * would be possible for another thread to: + * + * 1) Sample an allocation associated with gctx. + * 2) Deallocate the sampled object. + * 3) Successfully prof_gctx_try_destroy(gctx). + * + * The result would be that gctx no longer exists by the + * time this thread accesses it in + * prof_gctx_try_destroy(). + */ + gctx->nlimbo++; + destroy_gctx = true; + } else { + destroy_gctx = false; + } + break; + case prof_tctx_state_dumping: + /* + * A dumping thread needs tctx to remain valid until dumping + * has finished. Change state such that the dumping thread will + * complete destruction during a late dump iteration phase. + */ + tctx->state = prof_tctx_state_purgatory; + destroy_tctx = false; + destroy_gctx = false; + break; + default: + not_reached(); + destroy_tctx = false; + destroy_gctx = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + if (destroy_gctx) { + prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, + tdata); + } + + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + if (destroy_tdata) { + prof_tdata_destroy(tsd, tdata, false); + } + + if (destroy_tctx) { + idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); + } +} + +static bool +prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, + void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { + union { + prof_gctx_t *p; + void *v; + } gctx, tgctx; + union { + prof_bt_t *p; + void *v; + } btkey; + bool new_gctx; + + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + /* bt has never been seen before. Insert it. */ + prof_leave(tsd, tdata); + tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); + if (tgctx.v == NULL) { + return true; + } + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + gctx.p = tgctx.p; + btkey.p = &gctx.p->bt; + if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { + /* OOM. */ + prof_leave(tsd, tdata); + idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, + true, true); + return true; + } + new_gctx = true; + } else { + new_gctx = false; + } + } else { + tgctx.v = NULL; + new_gctx = false; + } + + if (!new_gctx) { + /* + * Increment nlimbo, in order to avoid a race condition with + * prof_tctx_destroy()/prof_gctx_try_destroy(). + */ + malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); + gctx.p->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); + new_gctx = false; + + if (tgctx.v != NULL) { + /* Lost race to insert. */ + idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, + true); + } + } + prof_leave(tsd, tdata); + + *p_btkey = btkey.v; + *p_gctx = gctx.p; + *p_new_gctx = new_gctx; + return false; +} + +prof_tctx_t * +prof_lookup(tsd_t *tsd, prof_bt_t *bt) { + union { + prof_tctx_t *p; + void *v; + } ret; + prof_tdata_t *tdata; + bool not_found; + + cassert(config_prof); + + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return NULL; + } + + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); + if (!not_found) { /* Note double negative! */ + ret.p->prepared = true; + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (not_found) { + void *btkey; + prof_gctx_t *gctx; + bool new_gctx, error; + + /* + * This thread's cache lacks bt. Look for it in the global + * cache. + */ + if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, + &new_gctx)) { + return NULL; + } + + /* Link a prof_tctx_t into gctx for this thread. */ + ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), + sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, + arena_ichoose(tsd, NULL), true); + if (ret.p == NULL) { + if (new_gctx) { + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } + return NULL; + } + ret.p->tdata = tdata; + ret.p->thr_uid = tdata->thr_uid; + ret.p->thr_discrim = tdata->thr_discrim; + memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); + ret.p->gctx = gctx; + ret.p->tctx_uid = tdata->tctx_uid_next++; + ret.p->prepared = true; + ret.p->state = prof_tctx_state_initializing; + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (error) { + if (new_gctx) { + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } + idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); + return NULL; + } + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + ret.p->state = prof_tctx_state_nominal; + tctx_tree_insert(&gctx->tctxs, ret.p); + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + } + + return ret.p; +} + +/* + * The bodies of this function and prof_leakcheck() are compiled out unless heap + * profiling is enabled, so that it is possible to compile jemalloc with + * floating point support completely disabled. Avoiding floating point code is + * important on memory-constrained systems, but it also enables a workaround for + * versions of glibc that don't properly save/restore floating point registers + * during dynamic lazy symbol loading (which internally calls into whatever + * malloc implementation happens to be integrated into the application). Note + * that some compilers (e.g. gcc 4.8) may use floating point registers for fast + * memory moves, so jemalloc must be compiled with such optimizations disabled + * (e.g. + * -mno-sse) in order for the workaround to be complete. + */ +void +prof_sample_threshold_update(prof_tdata_t *tdata) { +#ifdef JEMALLOC_PROF + if (!config_prof) { + return; + } + + if (lg_prof_sample == 0) { + tsd_bytes_until_sample_set(tsd_fetch(), 0); + return; + } + + /* + * Compute sample interval as a geometrically distributed random + * variable with mean (2^lg_prof_sample). + * + * __ __ + * | log(u) | 1 + * tdata->bytes_until_sample = | -------- |, where p = --------------- + * | log(1-p) | lg_prof_sample + * 2 + * + * For more information on the math, see: + * + * Non-Uniform Random Variate Generation + * Luc Devroye + * Springer-Verlag, New York, 1986 + * pp 500 + * (http://luc.devroye.org/rnbookindex.html) + */ + uint64_t r = prng_lg_range_u64(&tdata->prng_state, 53); + double u = (double)r * (1.0/9007199254740992.0L); + uint64_t bytes_until_sample = (uint64_t)(log(u) / + log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) + + (uint64_t)1U; + if (bytes_until_sample > SSIZE_MAX) { + bytes_until_sample = SSIZE_MAX; + } + tsd_bytes_until_sample_set(tsd_fetch(), bytes_until_sample); + +#endif +} + +#ifdef JEMALLOC_JET +static prof_tdata_t * +prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + size_t *tdata_count = (size_t *)arg; + + (*tdata_count)++; + + return NULL; +} + +size_t +prof_tdata_count(void) { + size_t tdata_count = 0; + tsdn_t *tsdn; + + tsdn = tsdn_fetch(); + malloc_mutex_lock(tsdn, &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, + (void *)&tdata_count); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + + return tdata_count; +} + +size_t +prof_bt_count(void) { + size_t bt_count; + tsd_t *tsd; + prof_tdata_t *tdata; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return 0; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); + bt_count = ckh_count(&bt2gctx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); + + return bt_count; +} +#endif + +static int +prof_dump_open_impl(bool propagate_err, const char *filename) { + int fd; + + fd = creat(filename, 0644); + if (fd == -1 && !propagate_err) { + malloc_printf(": creat(\"%s\"), 0644) failed\n", + filename); + if (opt_abort) { + abort(); + } + } + + return fd; +} +prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; + +static bool +prof_dump_flush(bool propagate_err) { + bool ret = false; + ssize_t err; + + cassert(config_prof); + + err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); + if (err == -1) { + if (!propagate_err) { + malloc_write(": write() failed during heap " + "profile flush\n"); + if (opt_abort) { + abort(); + } + } + ret = true; + } + prof_dump_buf_end = 0; + + return ret; +} + +static bool +prof_dump_close(bool propagate_err) { + bool ret; + + assert(prof_dump_fd != -1); + ret = prof_dump_flush(propagate_err); + close(prof_dump_fd); + prof_dump_fd = -1; + + return ret; +} + +static bool +prof_dump_write(bool propagate_err, const char *s) { + size_t i, slen, n; + + cassert(config_prof); + + i = 0; + slen = strlen(s); + while (i < slen) { + /* Flush the buffer if it is full. */ + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + if (prof_dump_flush(propagate_err) && propagate_err) { + return true; + } + } + + if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) { + /* Finish writing. */ + n = slen - i; + } else { + /* Write as much of s as will fit. */ + n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; + } + memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); + prof_dump_buf_end += n; + i += n; + } + assert(i == slen); + + return false; +} + +JEMALLOC_FORMAT_PRINTF(2, 3) +static bool +prof_dump_printf(bool propagate_err, const char *format, ...) { + bool ret; + va_list ap; + char buf[PROF_PRINTF_BUFSIZE]; + + va_start(ap, format); + malloc_vsnprintf(buf, sizeof(buf), format, ap); + va_end(ap); + ret = prof_dump_write(propagate_err, buf); + + return ret; +} + +static void +prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + + malloc_mutex_lock(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + return; + case prof_tctx_state_nominal: + tctx->state = prof_tctx_state_dumping; + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + + memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); + + tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + tdata->cnt_summed.accumobjs += + tctx->dump_cnts.accumobjs; + tdata->cnt_summed.accumbytes += + tctx->dump_cnts.accumbytes; + } + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + not_reached(); + } +} + +static void +prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { + malloc_mutex_assert_owner(tsdn, gctx->lock); + + gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; + gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; + } +} + +static prof_tctx_t * +prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); + break; + default: + not_reached(); + } + + return NULL; +} + +struct prof_tctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + +static prof_tctx_t * +prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { + struct prof_tctx_dump_iter_arg_s *arg = + (struct prof_tctx_dump_iter_arg_s *)opaque; + + malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + case prof_tctx_state_nominal: + /* Not captured by this dump. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + if (prof_dump_printf(arg->propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " + "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, + tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, + tctx->dump_cnts.accumbytes)) { + return tctx; + } + break; + default: + not_reached(); + } + return NULL; +} + +static prof_tctx_t * +prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + prof_tctx_t *ret; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + tctx->state = prof_tctx_state_nominal; + break; + case prof_tctx_state_purgatory: + ret = tctx; + goto label_return; + default: + not_reached(); + } + + ret = NULL; +label_return: + return ret; +} + +static void +prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { + cassert(config_prof); + + malloc_mutex_lock(tsdn, gctx->lock); + + /* + * Increment nlimbo so that gctx won't go away before dump. + * Additionally, link gctx into the dump list so that it is included in + * prof_dump()'s second pass. + */ + gctx->nlimbo++; + gctx_tree_insert(gctxs, gctx); + + memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); + + malloc_mutex_unlock(tsdn, gctx->lock); +} + +struct prof_gctx_merge_iter_arg_s { + tsdn_t *tsdn; + size_t leak_ngctx; +}; + +static prof_gctx_t * +prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + struct prof_gctx_merge_iter_arg_s *arg = + (struct prof_gctx_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, + (void *)arg->tsdn); + if (gctx->cnt_summed.curobjs != 0) { + arg->leak_ngctx++; + } + malloc_mutex_unlock(arg->tsdn, gctx->lock); + + return NULL; +} + +static void +prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { + prof_tdata_t *tdata = prof_tdata_get(tsd, false); + prof_gctx_t *gctx; + + /* + * Standard tree iteration won't work here, because as soon as we + * decrement gctx->nlimbo and unlock gctx, another thread can + * concurrently destroy it, which will corrupt the tree. Therefore, + * tear down the tree one node at a time during iteration. + */ + while ((gctx = gctx_tree_first(gctxs)) != NULL) { + gctx_tree_remove(gctxs, gctx); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + { + prof_tctx_t *next; + + next = NULL; + do { + prof_tctx_t *to_destroy = + tctx_tree_iter(&gctx->tctxs, next, + prof_tctx_finish_iter, + (void *)tsd_tsdn(tsd)); + if (to_destroy != NULL) { + next = tctx_tree_next(&gctx->tctxs, + to_destroy); + tctx_tree_remove(&gctx->tctxs, + to_destroy); + idalloctm(tsd_tsdn(tsd), to_destroy, + NULL, NULL, true, true); + } else { + next = NULL; + } + } while (next != NULL); + } + gctx->nlimbo--; + if (prof_gctx_should_destroy(gctx)) { + gctx->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + } + } +} + +struct prof_tdata_merge_iter_arg_s { + tsdn_t *tsdn; + prof_cnt_t cnt_all; +}; + +static prof_tdata_t * +prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *opaque) { + struct prof_tdata_merge_iter_arg_s *arg = + (struct prof_tdata_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, tdata->lock); + if (!tdata->expired) { + size_t tabind; + union { + prof_tctx_t *p; + void *v; + } tctx; + + tdata->dumping = true; + memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); + for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, + &tctx.v);) { + prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); + } + + arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; + arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; + if (opt_prof_accum) { + arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; + arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; + } + } else { + tdata->dumping = false; + } + malloc_mutex_unlock(arg->tsdn, tdata->lock); + + return NULL; +} + +static prof_tdata_t * +prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + bool propagate_err = *(bool *)arg; + + if (!tdata->dumping) { + return NULL; + } + + if (prof_dump_printf(propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", + tdata->thr_uid, tdata->cnt_summed.curobjs, + tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, + tdata->cnt_summed.accumbytes, + (tdata->thread_name != NULL) ? " " : "", + (tdata->thread_name != NULL) ? tdata->thread_name : "")) { + return tdata; + } + return NULL; +} + +static bool +prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, + const prof_cnt_t *cnt_all) { + bool ret; + + if (prof_dump_printf(propagate_err, + "heap_v2/%"FMTu64"\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, + cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { + return true; + } + + malloc_mutex_lock(tsdn, &tdatas_mtx); + ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, + (void *)&propagate_err) != NULL); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + return ret; +} +prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; + +static bool +prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, + const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { + bool ret; + unsigned i; + struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; + + cassert(config_prof); + malloc_mutex_assert_owner(tsdn, gctx->lock); + + /* Avoid dumping such gctx's that have no useful data. */ + if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || + (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { + assert(gctx->cnt_summed.curobjs == 0); + assert(gctx->cnt_summed.curbytes == 0); + assert(gctx->cnt_summed.accumobjs == 0); + assert(gctx->cnt_summed.accumbytes == 0); + ret = false; + goto label_return; + } + + if (prof_dump_printf(propagate_err, "@")) { + ret = true; + goto label_return; + } + for (i = 0; i < bt->len; i++) { + if (prof_dump_printf(propagate_err, " %#"FMTxPTR, + (uintptr_t)bt->vec[i])) { + ret = true; + goto label_return; + } + } + + if (prof_dump_printf(propagate_err, + "\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, + gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { + ret = true; + goto label_return; + } + + prof_tctx_dump_iter_arg.tsdn = tsdn; + prof_tctx_dump_iter_arg.propagate_err = propagate_err; + if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, + (void *)&prof_tctx_dump_iter_arg) != NULL) { + ret = true; + goto label_return; + } + + ret = false; +label_return: + return ret; +} + +#ifndef _WIN32 +JEMALLOC_FORMAT_PRINTF(1, 2) +static int +prof_open_maps(const char *format, ...) { + int mfd; + va_list ap; + char filename[PATH_MAX + 1]; + + va_start(ap, format); + malloc_vsnprintf(filename, sizeof(filename), format, ap); + va_end(ap); + +#if defined(O_CLOEXEC) + mfd = open(filename, O_RDONLY | O_CLOEXEC); +#else + mfd = open(filename, O_RDONLY); + if (mfd != -1) { + fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC); + } +#endif + + return mfd; +} +#endif + +static int +prof_getpid(void) { +#ifdef _WIN32 + return GetCurrentProcessId(); +#else + return getpid(); +#endif +} + +static bool +prof_dump_maps(bool propagate_err) { + bool ret; + int mfd; + + cassert(config_prof); +#ifdef __FreeBSD__ + mfd = prof_open_maps("/proc/curproc/map"); +#elif defined(_WIN32) + mfd = -1; // Not implemented +#else + { + int pid = prof_getpid(); + + mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); + if (mfd == -1) { + mfd = prof_open_maps("/proc/%d/maps", pid); + } + } +#endif + if (mfd != -1) { + ssize_t nread; + + if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && + propagate_err) { + ret = true; + goto label_return; + } + nread = 0; + do { + prof_dump_buf_end += nread; + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + /* Make space in prof_dump_buf before read(). */ + if (prof_dump_flush(propagate_err) && + propagate_err) { + ret = true; + goto label_return; + } + } + nread = malloc_read_fd(mfd, + &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE + - prof_dump_buf_end); + } while (nread > 0); + } else { + ret = true; + goto label_return; + } + + ret = false; +label_return: + if (mfd != -1) { + close(mfd); + } + return ret; +} + +/* + * See prof_sample_threshold_update() comment for why the body of this function + * is conditionally compiled. + */ +static void +prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, + const char *filename) { +#ifdef JEMALLOC_PROF + /* + * Scaling is equivalent AdjustSamples() in jeprof, but the result may + * differ slightly from what jeprof reports, because here we scale the + * summary values, whereas jeprof scales each context individually and + * reports the sums of the scaled values. + */ + if (cnt_all->curbytes != 0) { + double sample_period = (double)((uint64_t)1 << lg_prof_sample); + double ratio = (((double)cnt_all->curbytes) / + (double)cnt_all->curobjs) / sample_period; + double scale_factor = 1.0 / (1.0 - exp(-ratio)); + uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) + * scale_factor); + uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * + scale_factor); + + malloc_printf(": Leak approximation summary: ~%"FMTu64 + " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", + curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != + 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); + malloc_printf( + ": Run jeprof on \"%s\" for leak detail\n", + filename); + } +#endif +} + +struct prof_gctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + +static prof_gctx_t * +prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + prof_gctx_t *ret; + struct prof_gctx_dump_iter_arg_s *arg = + (struct prof_gctx_dump_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + + if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, + gctxs)) { + ret = gctx; + goto label_return; + } + + ret = NULL; +label_return: + malloc_mutex_unlock(arg->tsdn, gctx->lock); + return ret; +} + +static void +prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + prof_gctx_tree_t *gctxs) { + size_t tabind; + union { + prof_gctx_t *p; + void *v; + } gctx; + + prof_enter(tsd, tdata); + + /* + * Put gctx's in limbo and clear their counters in preparation for + * summing. + */ + gctx_tree_new(gctxs); + for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { + prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); + } + + /* + * Iterate over tdatas, and for the non-expired ones snapshot their tctx + * stats and merge them into the associated gctx's. + */ + prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd); + memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t)); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, + (void *)prof_tdata_merge_iter_arg); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + /* Merge tctx stats into gctx's. */ + prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_merge_iter_arg->leak_ngctx = 0; + gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, + (void *)prof_gctx_merge_iter_arg); + + prof_leave(tsd, tdata); +} + +static bool +prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, + prof_gctx_tree_t *gctxs) { + /* Create dump file. */ + if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { + return true; + } + + /* Dump profile header. */ + if (prof_dump_header(tsd_tsdn(tsd), propagate_err, + &prof_tdata_merge_iter_arg->cnt_all)) { + goto label_write_error; + } + + /* Dump per gctx profile stats. */ + prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_dump_iter_arg->propagate_err = propagate_err; + if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, + (void *)prof_gctx_dump_iter_arg) != NULL) { + goto label_write_error; + } + + /* Dump /proc//maps if possible. */ + if (prof_dump_maps(propagate_err)) { + goto label_write_error; + } + + if (prof_dump_close(propagate_err)) { + return true; + } + + return false; +label_write_error: + prof_dump_close(propagate_err); + return true; +} + +static bool +prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck) { + cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); + + prof_tdata_t * tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return true; + } + + pre_reentrancy(tsd, NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + + prof_gctx_tree_t gctxs; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); + bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata, + &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, + &prof_gctx_dump_iter_arg, &gctxs); + prof_gctx_finish(tsd, &gctxs); + + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); + post_reentrancy(tsd); + + if (err) { + return true; + } + + if (leakcheck) { + prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, + prof_gctx_merge_iter_arg.leak_ngctx, filename); + } + return false; +} + +#ifdef JEMALLOC_JET +void +prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, + uint64_t *accumbytes) { + tsd_t *tsd; + prof_tdata_t *tdata; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + prof_gctx_tree_t gctxs; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + if (curobjs != NULL) { + *curobjs = 0; + } + if (curbytes != NULL) { + *curbytes = 0; + } + if (accumobjs != NULL) { + *accumobjs = 0; + } + if (accumbytes != NULL) { + *accumbytes = 0; + } + return; + } + + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); + prof_gctx_finish(tsd, &gctxs); + + if (curobjs != NULL) { + *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs; + } + if (curbytes != NULL) { + *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes; + } + if (accumobjs != NULL) { + *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs; + } + if (accumbytes != NULL) { + *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes; + } +} +#endif + +#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) +#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) +static void +prof_dump_filename(char *filename, char v, uint64_t vseq) { + cassert(config_prof); + + if (vseq != VSEQ_INVALID) { + /* "...v.heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"FMTu64".%c%"FMTu64".heap", + opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); + } else { + /* "....heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"FMTu64".%c.heap", + opt_prof_prefix, prof_getpid(), prof_dump_seq, v); + } + prof_dump_seq++; +} + +static void +prof_fdump(void) { + tsd_t *tsd; + char filename[DUMP_FILENAME_BUFSIZE]; + + cassert(config_prof); + assert(opt_prof_final); + assert(opt_prof_prefix[0] != '\0'); + + if (!prof_booted) { + return; + } + tsd = tsd_fetch(); + assert(tsd_reentrancy_level_get(tsd) == 0); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump_filename(filename, 'f', VSEQ_INVALID); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, opt_prof_leak); +} + +bool +prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { + cassert(config_prof); + +#ifndef JEMALLOC_ATOMIC_U64 + if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", + WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { + return true; + } + prof_accum->accumbytes = 0; +#else + atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); +#endif + return false; +} + +void +prof_idump(tsdn_t *tsdn) { + tsd_t *tsd; + prof_tdata_t *tdata; + + cassert(config_prof); + + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { + return; + } + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { + return; + } + + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return; + } + if (tdata->enq) { + tdata->enq_idump = true; + return; + } + + if (opt_prof_prefix[0] != '\0') { + char filename[PATH_MAX + 1]; + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump_filename(filename, 'i', prof_dump_iseq); + prof_dump_iseq++; + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, false); + } +} + +bool +prof_mdump(tsd_t *tsd, const char *filename) { + cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); + + if (!opt_prof || !prof_booted) { + return true; + } + char filename_buf[DUMP_FILENAME_BUFSIZE]; + if (filename == NULL) { + /* No filename specified, so automatically generate one. */ + if (opt_prof_prefix[0] == '\0') { + return true; + } + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump_filename(filename_buf, 'm', prof_dump_mseq); + prof_dump_mseq++; + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + filename = filename_buf; + } + return prof_dump(tsd, true, filename, false); +} + +void +prof_gdump(tsdn_t *tsdn) { + tsd_t *tsd; + prof_tdata_t *tdata; + + cassert(config_prof); + + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { + return; + } + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { + return; + } + + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return; + } + if (tdata->enq) { + tdata->enq_gdump = true; + return; + } + + if (opt_prof_prefix[0] != '\0') { + char filename[DUMP_FILENAME_BUFSIZE]; + malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); + prof_dump_filename(filename, 'u', prof_dump_useq); + prof_dump_useq++; + malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, false); + } +} + +static void +prof_bt_hash(const void *key, size_t r_hash[2]) { + prof_bt_t *bt = (prof_bt_t *)key; + + cassert(config_prof); + + hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); +} + +static bool +prof_bt_keycomp(const void *k1, const void *k2) { + const prof_bt_t *bt1 = (prof_bt_t *)k1; + const prof_bt_t *bt2 = (prof_bt_t *)k2; + + cassert(config_prof); + + if (bt1->len != bt2->len) { + return false; + } + return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); +} + +static void +prof_bt_node_hash(const void *key, size_t r_hash[2]) { + const prof_bt_node_t *bt_node = (prof_bt_node_t *)key; + prof_bt_hash((void *)(&bt_node->bt), r_hash); +} + +static bool +prof_bt_node_keycomp(const void *k1, const void *k2) { + const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1; + const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2; + return prof_bt_keycomp((void *)(&bt_node1->bt), + (void *)(&bt_node2->bt)); +} + +static void +prof_thr_node_hash(const void *key, size_t r_hash[2]) { + const prof_thr_node_t *thr_node = (prof_thr_node_t *)key; + hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash); +} + +static bool +prof_thr_node_keycomp(const void *k1, const void *k2) { + const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1; + const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2; + return thr_node1->thr_uid == thr_node2->thr_uid; +} + +static uint64_t +prof_thr_uid_alloc(tsdn_t *tsdn) { + uint64_t thr_uid; + + malloc_mutex_lock(tsdn, &next_thr_uid_mtx); + thr_uid = next_thr_uid; + next_thr_uid++; + malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); + + return thr_uid; +} + +static prof_tdata_t * +prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, + char *thread_name, bool active) { + prof_tdata_t *tdata; + + cassert(config_prof); + + /* Initialize an empty cache for this thread. */ + tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), + sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (tdata == NULL) { + return NULL; + } + + tdata->lock = prof_tdata_mutex_choose(thr_uid); + tdata->thr_uid = thr_uid; + tdata->thr_discrim = thr_discrim; + tdata->thread_name = thread_name; + tdata->attached = true; + tdata->expired = false; + tdata->tctx_uid_next = 0; + + if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); + return NULL; + } + + tdata->prng_state = (uint64_t)(uintptr_t)tdata; + prof_sample_threshold_update(tdata); + + tdata->enq = false; + tdata->enq_idump = false; + tdata->enq_gdump = false; + + tdata->dumping = false; + tdata->active = active; + + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_insert(&tdatas, tdata); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + return tdata; +} + +prof_tdata_t * +prof_tdata_init(tsd_t *tsd) { + return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, + NULL, prof_thread_active_init_get(tsd_tsdn(tsd))); +} + +static bool +prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { + if (tdata->attached && !even_if_attached) { + return false; + } + if (ckh_count(&tdata->bt2tctx) != 0) { + return false; + } + return true; +} + +static bool +prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsdn, tdata->lock); + + return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); +} + +static void +prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); + + tdata_tree_remove(&tdatas, tdata); + + assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); + + if (tdata->thread_name != NULL) { + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, + true); + } + ckh_delete(tsd, &tdata->bt2tctx); + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); +} + +static void +prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + prof_tdata_destroy_locked(tsd, tdata, even_if_attached); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); +} + +static void +prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { + bool destroy_tdata; + + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + if (tdata->attached) { + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, + true); + /* + * Only detach if !destroy_tdata, because detaching would allow + * another thread to win the race to destroy tdata. + */ + if (!destroy_tdata) { + tdata->attached = false; + } + tsd_prof_tdata_set(tsd, NULL); + } else { + destroy_tdata = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (destroy_tdata) { + prof_tdata_destroy(tsd, tdata, true); + } +} + +prof_tdata_t * +prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { + uint64_t thr_uid = tdata->thr_uid; + uint64_t thr_discrim = tdata->thr_discrim + 1; + char *thread_name = (tdata->thread_name != NULL) ? + prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; + bool active = tdata->active; + + prof_tdata_detach(tsd, tdata); + return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, + active); +} + +static bool +prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { + bool destroy_tdata; + + malloc_mutex_lock(tsdn, tdata->lock); + if (!tdata->expired) { + tdata->expired = true; + destroy_tdata = tdata->attached ? false : + prof_tdata_should_destroy(tsdn, tdata, false); + } else { + destroy_tdata = false; + } + malloc_mutex_unlock(tsdn, tdata->lock); + + return destroy_tdata; +} + +static prof_tdata_t * +prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); +} + +void +prof_reset(tsd_t *tsd, size_t lg_sample) { + prof_tdata_t *next; + + assert(lg_sample < (sizeof(uint64_t) << 3)); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + + lg_prof_sample = lg_sample; + + next = NULL; + do { + prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, + prof_tdata_reset_iter, (void *)tsd); + if (to_destroy != NULL) { + next = tdata_tree_next(&tdatas, to_destroy); + prof_tdata_destroy_locked(tsd, to_destroy, false); + } else { + next = NULL; + } + } while (next != NULL); + + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); +} + +void +prof_tdata_cleanup(tsd_t *tsd) { + prof_tdata_t *tdata; + + if (!config_prof) { + return; + } + + tdata = tsd_prof_tdata_get(tsd); + if (tdata != NULL) { + prof_tdata_detach(tsd, tdata); + } +} + +bool +prof_active_get(tsdn_t *tsdn) { + bool prof_active_current; + + malloc_mutex_lock(tsdn, &prof_active_mtx); + prof_active_current = prof_active; + malloc_mutex_unlock(tsdn, &prof_active_mtx); + return prof_active_current; +} + +bool +prof_active_set(tsdn_t *tsdn, bool active) { + bool prof_active_old; + + malloc_mutex_lock(tsdn, &prof_active_mtx); + prof_active_old = prof_active; + prof_active = active; + malloc_mutex_unlock(tsdn, &prof_active_mtx); + return prof_active_old; +} + +#ifdef JEMALLOC_JET +size_t +prof_log_bt_count(void) { + size_t cnt = 0; + prof_bt_node_t *node = log_bt_first; + while (node != NULL) { + cnt++; + node = node->next; + } + return cnt; +} + +size_t +prof_log_alloc_count(void) { + size_t cnt = 0; + prof_alloc_node_t *node = log_alloc_first; + while (node != NULL) { + cnt++; + node = node->next; + } + return cnt; +} + +size_t +prof_log_thr_count(void) { + size_t cnt = 0; + prof_thr_node_t *node = log_thr_first; + while (node != NULL) { + cnt++; + node = node->next; + } + return cnt; +} + +bool +prof_log_is_logging(void) { + return prof_logging_state == prof_logging_state_started; +} + +bool +prof_log_rep_check(void) { + if (prof_logging_state == prof_logging_state_stopped + && log_tables_initialized) { + return true; + } + + if (log_bt_last != NULL && log_bt_last->next != NULL) { + return true; + } + if (log_thr_last != NULL && log_thr_last->next != NULL) { + return true; + } + if (log_alloc_last != NULL && log_alloc_last->next != NULL) { + return true; + } + + size_t bt_count = prof_log_bt_count(); + size_t thr_count = prof_log_thr_count(); + size_t alloc_count = prof_log_alloc_count(); + + + if (prof_logging_state == prof_logging_state_stopped) { + if (bt_count != 0 || thr_count != 0 || alloc_count || 0) { + return true; + } + } + + prof_alloc_node_t *node = log_alloc_first; + while (node != NULL) { + if (node->alloc_bt_ind >= bt_count) { + return true; + } + if (node->free_bt_ind >= bt_count) { + return true; + } + if (node->alloc_thr_ind >= thr_count) { + return true; + } + if (node->free_thr_ind >= thr_count) { + return true; + } + if (node->alloc_time_ns > node->free_time_ns) { + return true; + } + node = node->next; + } + + return false; +} + +void +prof_log_dummy_set(bool new_value) { + prof_log_dummy = new_value; +} +#endif + +bool +prof_log_start(tsdn_t *tsdn, const char *filename) { + if (!opt_prof || !prof_booted) { + return true; + } + + bool ret = false; + size_t buf_size = PATH_MAX + 1; + + malloc_mutex_lock(tsdn, &log_mtx); + + if (prof_logging_state != prof_logging_state_stopped) { + ret = true; + } else if (filename == NULL) { + /* Make default name. */ + malloc_snprintf(log_filename, buf_size, "%s.%d.%"FMTu64".json", + opt_prof_prefix, prof_getpid(), log_seq); + log_seq++; + prof_logging_state = prof_logging_state_started; + } else if (strlen(filename) >= buf_size) { + ret = true; + } else { + strcpy(log_filename, filename); + prof_logging_state = prof_logging_state_started; + } + + if (!ret) { + nstime_update(&log_start_timestamp); + } + + malloc_mutex_unlock(tsdn, &log_mtx); + + return ret; +} + +/* Used as an atexit function to stop logging on exit. */ +static void +prof_log_stop_final(void) { + tsd_t *tsd = tsd_fetch(); + prof_log_stop(tsd_tsdn(tsd)); +} + +struct prof_emitter_cb_arg_s { + int fd; + ssize_t ret; +}; + +static void +prof_emitter_write_cb(void *opaque, const char *to_write) { + struct prof_emitter_cb_arg_s *arg = + (struct prof_emitter_cb_arg_s *)opaque; + size_t bytes = strlen(to_write); +#ifdef JEMALLOC_JET + if (prof_log_dummy) { + return; + } +#endif + arg->ret = write(arg->fd, (void *)to_write, bytes); +} + +/* + * prof_log_emit_{...} goes through the appropriate linked list, emitting each + * node to the json and deallocating it. + */ +static void +prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) { + emitter_json_array_kv_begin(emitter, "threads"); + prof_thr_node_t *thr_node = log_thr_first; + prof_thr_node_t *thr_old_node; + while (thr_node != NULL) { + emitter_json_object_begin(emitter); + + emitter_json_kv(emitter, "thr_uid", emitter_type_uint64, + &thr_node->thr_uid); + + char *thr_name = thr_node->name; + + emitter_json_kv(emitter, "thr_name", emitter_type_string, + &thr_name); + + emitter_json_object_end(emitter); + thr_old_node = thr_node; + thr_node = thr_node->next; + idalloc(tsd, thr_old_node); + } + emitter_json_array_end(emitter); +} + +static void +prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) { + emitter_json_array_kv_begin(emitter, "stack_traces"); + prof_bt_node_t *bt_node = log_bt_first; + prof_bt_node_t *bt_old_node; + /* + * Calculate how many hex digits we need: twice number of bytes, two for + * "0x", and then one more for terminating '\0'. + */ + char buf[2 * sizeof(intptr_t) + 3]; + size_t buf_sz = sizeof(buf); + while (bt_node != NULL) { + emitter_json_array_begin(emitter); + size_t i; + for (i = 0; i < bt_node->bt.len; i++) { + malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]); + char *trace_str = buf; + emitter_json_value(emitter, emitter_type_string, + &trace_str); + } + emitter_json_array_end(emitter); + + bt_old_node = bt_node; + bt_node = bt_node->next; + idalloc(tsd, bt_old_node); + } + emitter_json_array_end(emitter); +} + +static void +prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) { + emitter_json_array_kv_begin(emitter, "allocations"); + prof_alloc_node_t *alloc_node = log_alloc_first; + prof_alloc_node_t *alloc_old_node; + while (alloc_node != NULL) { + emitter_json_object_begin(emitter); + + emitter_json_kv(emitter, "alloc_thread", emitter_type_size, + &alloc_node->alloc_thr_ind); + + emitter_json_kv(emitter, "free_thread", emitter_type_size, + &alloc_node->free_thr_ind); + + emitter_json_kv(emitter, "alloc_trace", emitter_type_size, + &alloc_node->alloc_bt_ind); + + emitter_json_kv(emitter, "free_trace", emitter_type_size, + &alloc_node->free_bt_ind); + + emitter_json_kv(emitter, "alloc_timestamp", + emitter_type_uint64, &alloc_node->alloc_time_ns); + + emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64, + &alloc_node->free_time_ns); + + emitter_json_kv(emitter, "usize", emitter_type_uint64, + &alloc_node->usize); + + emitter_json_object_end(emitter); + + alloc_old_node = alloc_node; + alloc_node = alloc_node->next; + idalloc(tsd, alloc_old_node); + } + emitter_json_array_end(emitter); +} + +static void +prof_log_emit_metadata(emitter_t *emitter) { + emitter_json_object_kv_begin(emitter, "info"); + + nstime_t now = NSTIME_ZERO_INITIALIZER; + + nstime_update(&now); + uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp); + emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns); + + char *vers = JEMALLOC_VERSION; + emitter_json_kv(emitter, "version", + emitter_type_string, &vers); + + emitter_json_kv(emitter, "lg_sample_rate", + emitter_type_int, &lg_prof_sample); + + int pid = prof_getpid(); + emitter_json_kv(emitter, "pid", emitter_type_int, &pid); + + emitter_json_object_end(emitter); +} + + +bool +prof_log_stop(tsdn_t *tsdn) { + if (!opt_prof || !prof_booted) { + return true; + } + + tsd_t *tsd = tsdn_tsd(tsdn); + malloc_mutex_lock(tsdn, &log_mtx); + + if (prof_logging_state != prof_logging_state_started) { + malloc_mutex_unlock(tsdn, &log_mtx); + return true; + } + + /* + * Set the state to dumping. We'll set it to stopped when we're done. + * Since other threads won't be able to start/stop/log when the state is + * dumping, we don't have to hold the lock during the whole method. + */ + prof_logging_state = prof_logging_state_dumping; + malloc_mutex_unlock(tsdn, &log_mtx); + + + emitter_t emitter; + + /* Create a file. */ + + int fd; +#ifdef JEMALLOC_JET + if (prof_log_dummy) { + fd = 0; + } else { + fd = creat(log_filename, 0644); + } +#else + fd = creat(log_filename, 0644); +#endif + + if (fd == -1) { + malloc_printf(": creat() for log file \"%s\" " + " failed with %d\n", log_filename, errno); + if (opt_abort) { + abort(); + } + return true; + } + + /* Emit to json. */ + struct prof_emitter_cb_arg_s arg; + arg.fd = fd; + emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb, + (void *)(&arg)); + + emitter_begin(&emitter); + prof_log_emit_metadata(&emitter); + prof_log_emit_threads(tsd, &emitter); + prof_log_emit_traces(tsd, &emitter); + prof_log_emit_allocs(tsd, &emitter); + emitter_end(&emitter); + + /* Reset global state. */ + if (log_tables_initialized) { + ckh_delete(tsd, &log_bt_node_set); + ckh_delete(tsd, &log_thr_node_set); + } + log_tables_initialized = false; + log_bt_index = 0; + log_thr_index = 0; + log_bt_first = NULL; + log_bt_last = NULL; + log_thr_first = NULL; + log_thr_last = NULL; + log_alloc_first = NULL; + log_alloc_last = NULL; + + malloc_mutex_lock(tsdn, &log_mtx); + prof_logging_state = prof_logging_state_stopped; + malloc_mutex_unlock(tsdn, &log_mtx); + +#ifdef JEMALLOC_JET + if (prof_log_dummy) { + return false; + } +#endif + return close(fd); +} + +const char * +prof_thread_name_get(tsd_t *tsd) { + prof_tdata_t *tdata; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return ""; + } + return (tdata->thread_name != NULL ? tdata->thread_name : ""); +} + +static char * +prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { + char *ret; + size_t size; + + if (thread_name == NULL) { + return NULL; + } + + size = strlen(thread_name) + 1; + if (size == 1) { + return ""; + } + + ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (ret == NULL) { + return NULL; + } + memcpy(ret, thread_name, size); + return ret; +} + +int +prof_thread_name_set(tsd_t *tsd, const char *thread_name) { + prof_tdata_t *tdata; + unsigned i; + char *s; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return EAGAIN; + } + + /* Validate input. */ + if (thread_name == NULL) { + return EFAULT; + } + for (i = 0; thread_name[i] != '\0'; i++) { + char c = thread_name[i]; + if (!isgraph(c) && !isblank(c)) { + return EFAULT; + } + } + + s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); + if (s == NULL) { + return EAGAIN; + } + + if (tdata->thread_name != NULL) { + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, + true); + tdata->thread_name = NULL; + } + if (strlen(s) > 0) { + tdata->thread_name = s; + } + return 0; +} + +bool +prof_thread_active_get(tsd_t *tsd) { + prof_tdata_t *tdata; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return false; + } + return tdata->active; +} + +bool +prof_thread_active_set(tsd_t *tsd, bool active) { + prof_tdata_t *tdata; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return true; + } + tdata->active = active; + return false; +} + +bool +prof_thread_active_init_get(tsdn_t *tsdn) { + bool active_init; + + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); + active_init = prof_thread_active_init; + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); + return active_init; +} + +bool +prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) { + bool active_init_old; + + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); + active_init_old = prof_thread_active_init; + prof_thread_active_init = active_init; + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); + return active_init_old; +} + +bool +prof_gdump_get(tsdn_t *tsdn) { + bool prof_gdump_current; + + malloc_mutex_lock(tsdn, &prof_gdump_mtx); + prof_gdump_current = prof_gdump_val; + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); + return prof_gdump_current; +} + +bool +prof_gdump_set(tsdn_t *tsdn, bool gdump) { + bool prof_gdump_old; + + malloc_mutex_lock(tsdn, &prof_gdump_mtx); + prof_gdump_old = prof_gdump_val; + prof_gdump_val = gdump; + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); + return prof_gdump_old; +} + +void +prof_boot0(void) { + cassert(config_prof); + + memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, + sizeof(PROF_PREFIX_DEFAULT)); +} + +void +prof_boot1(void) { + cassert(config_prof); + + /* + * opt_prof must be in its final state before any arenas are + * initialized, so this function must be executed early. + */ + + if (opt_prof_leak && !opt_prof) { + /* + * Enable opt_prof, but in such a way that profiles are never + * automatically dumped. + */ + opt_prof = true; + opt_prof_gdump = false; + } else if (opt_prof) { + if (opt_lg_prof_interval >= 0) { + prof_interval = (((uint64_t)1U) << + opt_lg_prof_interval); + } + } +} + +bool +prof_boot2(tsd_t *tsd) { + cassert(config_prof); + + if (opt_prof) { + unsigned i; + + lg_prof_sample = opt_lg_prof_sample; + + prof_active = opt_prof_active; + if (malloc_mutex_init(&prof_active_mtx, "prof_active", + WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { + return true; + } + + prof_gdump_val = opt_prof_gdump; + if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", + WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { + return true; + } + + prof_thread_active_init = opt_prof_thread_active_init; + if (malloc_mutex_init(&prof_thread_active_init_mtx, + "prof_thread_active_init", + WITNESS_RANK_PROF_THREAD_ACTIVE_INIT, + malloc_mutex_rank_exclusive)) { + return true; + } + + if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + return true; + } + if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", + WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { + return true; + } + + tdata_tree_new(&tdatas); + if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", + WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { + return true; + } + + next_thr_uid = 0; + if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", + WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) { + return true; + } + + if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", + WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) { + return true; + } + if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", + WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) { + return true; + } + + if (opt_prof_final && opt_prof_prefix[0] != '\0' && + atexit(prof_fdump) != 0) { + malloc_write(": Error in atexit()\n"); + if (opt_abort) { + abort(); + } + } + + if (opt_prof_log) { + prof_log_start(tsd_tsdn(tsd), NULL); + } + + if (atexit(prof_log_stop_final) != 0) { + malloc_write(": Error in atexit() " + "for logging\n"); + if (opt_abort) { + abort(); + } + } + + if (malloc_mutex_init(&log_mtx, "prof_log", + WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) { + return true; + } + + if (ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS, + prof_bt_node_hash, prof_bt_node_keycomp)) { + return true; + } + + if (ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS, + prof_thr_node_hash, prof_thr_node_keycomp)) { + return true; + } + + log_tables_initialized = true; + + gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), + CACHELINE); + if (gctx_locks == NULL) { + return true; + } + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", + WITNESS_RANK_PROF_GCTX, + malloc_mutex_rank_exclusive)) { + return true; + } + } + + tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), + CACHELINE); + if (tdata_locks == NULL) { + return true; + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", + WITNESS_RANK_PROF_TDATA, + malloc_mutex_rank_exclusive)) { + return true; + } + } +#ifdef JEMALLOC_PROF_LIBGCC + /* + * Cause the backtracing machinery to allocate its internal + * state before enabling profiling. + */ + _Unwind_Backtrace(prof_unwind_init_callback, NULL); +#endif + } + prof_booted = true; + + return false; +} + +void +prof_prefork0(tsdn_t *tsdn) { + if (config_prof && opt_prof) { + unsigned i; + + malloc_mutex_prefork(tsdn, &prof_dump_mtx); + malloc_mutex_prefork(tsdn, &bt2gctx_mtx); + malloc_mutex_prefork(tsdn, &tdatas_mtx); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_prefork(tsdn, &tdata_locks[i]); + } + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_prefork(tsdn, &gctx_locks[i]); + } + } +} + +void +prof_prefork1(tsdn_t *tsdn) { + if (config_prof && opt_prof) { + malloc_mutex_prefork(tsdn, &prof_active_mtx); + malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); + malloc_mutex_prefork(tsdn, &prof_gdump_mtx); + malloc_mutex_prefork(tsdn, &next_thr_uid_mtx); + malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); + } +} + +void +prof_postfork_parent(tsdn_t *tsdn) { + if (config_prof && opt_prof) { + unsigned i; + + malloc_mutex_postfork_parent(tsdn, + &prof_thread_active_init_mtx); + malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); + } + malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); + malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); + } +} + +void +prof_postfork_child(tsdn_t *tsdn) { + if (config_prof && opt_prof) { + unsigned i; + + malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); + malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_child(tsdn, &prof_active_mtx); + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); + } + malloc_mutex_postfork_child(tsdn, &tdatas_mtx); + malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); + } +} + +/******************************************************************************/ diff --git a/deps/jemalloc/src/rtree.c b/deps/jemalloc/src/rtree.c new file mode 100644 index 0000000..4ae41fe --- /dev/null +++ b/deps/jemalloc/src/rtree.c @@ -0,0 +1,320 @@ +#define JEMALLOC_RTREE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" + +/* + * Only the most significant bits of keys passed to rtree_{read,write}() are + * used. + */ +bool +rtree_new(rtree_t *rtree, bool zeroed) { +#ifdef JEMALLOC_JET + if (!zeroed) { + memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */ + } +#else + assert(zeroed); +#endif + + if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE, + malloc_mutex_rank_exclusive)) { + return true; + } + + return false; +} + +static rtree_node_elm_t * +rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms * + sizeof(rtree_node_elm_t), CACHELINE); +} +rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl; + +static void +rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) { + /* Nodes are never deleted during normal operation. */ + not_reached(); +} +rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc = + rtree_node_dalloc_impl; + +static rtree_leaf_elm_t * +rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms * + sizeof(rtree_leaf_elm_t), CACHELINE); +} +rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl; + +static void +rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) { + /* Leaves are never deleted during normal operation. */ + not_reached(); +} +rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc = + rtree_leaf_dalloc_impl; + +#ifdef JEMALLOC_JET +# if RTREE_HEIGHT > 1 +static void +rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree, + unsigned level) { + size_t nchildren = ZU(1) << rtree_levels[level].bits; + if (level + 2 < RTREE_HEIGHT) { + for (size_t i = 0; i < nchildren; i++) { + rtree_node_elm_t *node = + (rtree_node_elm_t *)atomic_load_p(&subtree[i].child, + ATOMIC_RELAXED); + if (node != NULL) { + rtree_delete_subtree(tsdn, rtree, node, level + + 1); + } + } + } else { + for (size_t i = 0; i < nchildren; i++) { + rtree_leaf_elm_t *leaf = + (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child, + ATOMIC_RELAXED); + if (leaf != NULL) { + rtree_leaf_dalloc(tsdn, rtree, leaf); + } + } + } + + if (subtree != rtree->root) { + rtree_node_dalloc(tsdn, rtree, subtree); + } +} +# endif + +void +rtree_delete(tsdn_t *tsdn, rtree_t *rtree) { +# if RTREE_HEIGHT > 1 + rtree_delete_subtree(tsdn, rtree, rtree->root, 0); +# endif +} +#endif + +static rtree_node_elm_t * +rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, + atomic_p_t *elmp) { + malloc_mutex_lock(tsdn, &rtree->init_lock); + /* + * If *elmp is non-null, then it was initialized with the init lock + * held, so we can get by with 'relaxed' here. + */ + rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED); + if (node == NULL) { + node = rtree_node_alloc(tsdn, rtree, ZU(1) << + rtree_levels[level].bits); + if (node == NULL) { + malloc_mutex_unlock(tsdn, &rtree->init_lock); + return NULL; + } + /* + * Even though we hold the lock, a later reader might not; we + * need release semantics. + */ + atomic_store_p(elmp, node, ATOMIC_RELEASE); + } + malloc_mutex_unlock(tsdn, &rtree->init_lock); + + return node; +} + +static rtree_leaf_elm_t * +rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) { + malloc_mutex_lock(tsdn, &rtree->init_lock); + /* + * If *elmp is non-null, then it was initialized with the init lock + * held, so we can get by with 'relaxed' here. + */ + rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED); + if (leaf == NULL) { + leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) << + rtree_levels[RTREE_HEIGHT-1].bits); + if (leaf == NULL) { + malloc_mutex_unlock(tsdn, &rtree->init_lock); + return NULL; + } + /* + * Even though we hold the lock, a later reader might not; we + * need release semantics. + */ + atomic_store_p(elmp, leaf, ATOMIC_RELEASE); + } + malloc_mutex_unlock(tsdn, &rtree->init_lock); + + return leaf; +} + +static bool +rtree_node_valid(rtree_node_elm_t *node) { + return ((uintptr_t)node != (uintptr_t)0); +} + +static bool +rtree_leaf_valid(rtree_leaf_elm_t *leaf) { + return ((uintptr_t)leaf != (uintptr_t)0); +} + +static rtree_node_elm_t * +rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) { + rtree_node_elm_t *node; + + if (dependent) { + node = (rtree_node_elm_t *)atomic_load_p(&elm->child, + ATOMIC_RELAXED); + } else { + node = (rtree_node_elm_t *)atomic_load_p(&elm->child, + ATOMIC_ACQUIRE); + } + + assert(!dependent || node != NULL); + return node; +} + +static rtree_node_elm_t * +rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, + unsigned level, bool dependent) { + rtree_node_elm_t *node; + + node = rtree_child_node_tryread(elm, dependent); + if (!dependent && unlikely(!rtree_node_valid(node))) { + node = rtree_node_init(tsdn, rtree, level + 1, &elm->child); + } + assert(!dependent || node != NULL); + return node; +} + +static rtree_leaf_elm_t * +rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) { + rtree_leaf_elm_t *leaf; + + if (dependent) { + leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, + ATOMIC_RELAXED); + } else { + leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, + ATOMIC_ACQUIRE); + } + + assert(!dependent || leaf != NULL); + return leaf; +} + +static rtree_leaf_elm_t * +rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, + unsigned level, bool dependent) { + rtree_leaf_elm_t *leaf; + + leaf = rtree_child_leaf_tryread(elm, dependent); + if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { + leaf = rtree_leaf_init(tsdn, rtree, &elm->child); + } + assert(!dependent || leaf != NULL); + return leaf; +} + +rtree_leaf_elm_t * +rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, bool init_missing) { + rtree_node_elm_t *node; + rtree_leaf_elm_t *leaf; +#if RTREE_HEIGHT > 1 + node = rtree->root; +#else + leaf = rtree->root; +#endif + + if (config_debug) { + uintptr_t leafkey = rtree_leafkey(key); + for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { + assert(rtree_ctx->cache[i].leafkey != leafkey); + } + for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { + assert(rtree_ctx->l2_cache[i].leafkey != leafkey); + } + } + +#define RTREE_GET_CHILD(level) { \ + assert(level < RTREE_HEIGHT-1); \ + if (level != 0 && !dependent && \ + unlikely(!rtree_node_valid(node))) { \ + return NULL; \ + } \ + uintptr_t subkey = rtree_subkey(key, level); \ + if (level + 2 < RTREE_HEIGHT) { \ + node = init_missing ? \ + rtree_child_node_read(tsdn, rtree, \ + &node[subkey], level, dependent) : \ + rtree_child_node_tryread(&node[subkey], \ + dependent); \ + } else { \ + leaf = init_missing ? \ + rtree_child_leaf_read(tsdn, rtree, \ + &node[subkey], level, dependent) : \ + rtree_child_leaf_tryread(&node[subkey], \ + dependent); \ + } \ + } + /* + * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss): + * (1) evict last entry in L2 cache; (2) move the collision slot from L1 + * cache down to L2; and 3) fill L1. + */ +#define RTREE_GET_LEAF(level) { \ + assert(level == RTREE_HEIGHT-1); \ + if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \ + return NULL; \ + } \ + if (RTREE_CTX_NCACHE_L2 > 1) { \ + memmove(&rtree_ctx->l2_cache[1], \ + &rtree_ctx->l2_cache[0], \ + sizeof(rtree_ctx_cache_elm_t) * \ + (RTREE_CTX_NCACHE_L2 - 1)); \ + } \ + size_t slot = rtree_cache_direct_map(key); \ + rtree_ctx->l2_cache[0].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[0].leaf = \ + rtree_ctx->cache[slot].leaf; \ + uintptr_t leafkey = rtree_leafkey(key); \ + rtree_ctx->cache[slot].leafkey = leafkey; \ + rtree_ctx->cache[slot].leaf = leaf; \ + uintptr_t subkey = rtree_subkey(key, level); \ + return &leaf[subkey]; \ + } + if (RTREE_HEIGHT > 1) { + RTREE_GET_CHILD(0) + } + if (RTREE_HEIGHT > 2) { + RTREE_GET_CHILD(1) + } + if (RTREE_HEIGHT > 3) { + for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) { + RTREE_GET_CHILD(i) + } + } + RTREE_GET_LEAF(RTREE_HEIGHT-1) +#undef RTREE_GET_CHILD +#undef RTREE_GET_LEAF + not_reached(); +} + +void +rtree_ctx_data_init(rtree_ctx_t *ctx) { + for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { + rtree_ctx_cache_elm_t *cache = &ctx->cache[i]; + cache->leafkey = RTREE_LEAFKEY_INVALID; + cache->leaf = NULL; + } + for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { + rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i]; + cache->leafkey = RTREE_LEAFKEY_INVALID; + cache->leaf = NULL; + } +} diff --git a/deps/jemalloc/src/safety_check.c b/deps/jemalloc/src/safety_check.c new file mode 100644 index 0000000..804155d --- /dev/null +++ b/deps/jemalloc/src/safety_check.c @@ -0,0 +1,24 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +static void (*safety_check_abort)(const char *message); + +void safety_check_set_abort(void (*abort_fn)(const char *)) { + safety_check_abort = abort_fn; +} + +void safety_check_fail(const char *format, ...) { + char buf[MALLOC_PRINTF_BUFSIZE]; + + va_list ap; + va_start(ap, format); + malloc_vsnprintf(buf, MALLOC_PRINTF_BUFSIZE, format, ap); + va_end(ap); + + if (safety_check_abort == NULL) { + malloc_write(buf); + abort(); + } else { + safety_check_abort(buf); + } +} diff --git a/deps/jemalloc/src/sc.c b/deps/jemalloc/src/sc.c new file mode 100644 index 0000000..89ddb6b --- /dev/null +++ b/deps/jemalloc/src/sc.c @@ -0,0 +1,313 @@ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/sc.h" + +/* + * This module computes the size classes used to satisfy allocations. The logic + * here was ported more or less line-by-line from a shell script, and because of + * that is not the most idiomatic C. Eventually we should fix this, but for now + * at least the damage is compartmentalized to this file. + */ + +sc_data_t sc_data_global; + +static size_t +reg_size_compute(int lg_base, int lg_delta, int ndelta) { + return (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta); +} + +/* Returns the number of pages in the slab. */ +static int +slab_size(int lg_page, int lg_base, int lg_delta, int ndelta) { + size_t page = (ZU(1) << lg_page); + size_t reg_size = reg_size_compute(lg_base, lg_delta, ndelta); + + size_t try_slab_size = page; + size_t try_nregs = try_slab_size / reg_size; + size_t perfect_slab_size = 0; + bool perfect = false; + /* + * This loop continues until we find the least common multiple of the + * page size and size class size. Size classes are all of the form + * base + ndelta * delta == (ndelta + base/ndelta) * delta, which is + * (ndelta + ngroup) * delta. The way we choose slabbing strategies + * means that delta is at most the page size and ndelta < ngroup. So + * the loop executes for at most 2 * ngroup - 1 iterations, which is + * also the bound on the number of pages in a slab chosen by default. + * With the current default settings, this is at most 7. + */ + while (!perfect) { + perfect_slab_size = try_slab_size; + size_t perfect_nregs = try_nregs; + try_slab_size += page; + try_nregs = try_slab_size / reg_size; + if (perfect_slab_size == perfect_nregs * reg_size) { + perfect = true; + } + } + return (int)(perfect_slab_size / page); +} + +static void +size_class( + /* Output. */ + sc_t *sc, + /* Configuration decisions. */ + int lg_max_lookup, int lg_page, int lg_ngroup, + /* Inputs specific to the size class. */ + int index, int lg_base, int lg_delta, int ndelta) { + sc->index = index; + sc->lg_base = lg_base; + sc->lg_delta = lg_delta; + sc->ndelta = ndelta; + sc->psz = (reg_size_compute(lg_base, lg_delta, ndelta) + % (ZU(1) << lg_page) == 0); + size_t size = (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta); + if (index == 0) { + assert(!sc->psz); + } + if (size < (ZU(1) << (lg_page + lg_ngroup))) { + sc->bin = true; + sc->pgs = slab_size(lg_page, lg_base, lg_delta, ndelta); + } else { + sc->bin = false; + sc->pgs = 0; + } + if (size <= (ZU(1) << lg_max_lookup)) { + sc->lg_delta_lookup = lg_delta; + } else { + sc->lg_delta_lookup = 0; + } +} + +static void +size_classes( + /* Output. */ + sc_data_t *sc_data, + /* Determined by the system. */ + size_t lg_ptr_size, int lg_quantum, + /* Configuration decisions. */ + int lg_tiny_min, int lg_max_lookup, int lg_page, int lg_ngroup) { + int ptr_bits = (1 << lg_ptr_size) * 8; + int ngroup = (1 << lg_ngroup); + int ntiny = 0; + int nlbins = 0; + int lg_tiny_maxclass = (unsigned)-1; + int nbins = 0; + int npsizes = 0; + + int index = 0; + + int ndelta = 0; + int lg_base = lg_tiny_min; + int lg_delta = lg_base; + + /* Outputs that we update as we go. */ + size_t lookup_maxclass = 0; + size_t small_maxclass = 0; + int lg_large_minclass = 0; + size_t large_maxclass = 0; + + /* Tiny size classes. */ + while (lg_base < lg_quantum) { + sc_t *sc = &sc_data->sc[index]; + size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index, + lg_base, lg_delta, ndelta); + if (sc->lg_delta_lookup != 0) { + nlbins = index + 1; + } + if (sc->psz) { + npsizes++; + } + if (sc->bin) { + nbins++; + } + ntiny++; + /* Final written value is correct. */ + lg_tiny_maxclass = lg_base; + index++; + lg_delta = lg_base; + lg_base++; + } + + /* First non-tiny (pseudo) group. */ + if (ntiny != 0) { + sc_t *sc = &sc_data->sc[index]; + /* + * See the note in sc.h; the first non-tiny size class has an + * unusual encoding. + */ + lg_base--; + ndelta = 1; + size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index, + lg_base, lg_delta, ndelta); + index++; + lg_base++; + lg_delta++; + if (sc->psz) { + npsizes++; + } + if (sc->bin) { + nbins++; + } + } + while (ndelta < ngroup) { + sc_t *sc = &sc_data->sc[index]; + size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index, + lg_base, lg_delta, ndelta); + index++; + ndelta++; + if (sc->psz) { + npsizes++; + } + if (sc->bin) { + nbins++; + } + } + + /* All remaining groups. */ + lg_base = lg_base + lg_ngroup; + while (lg_base < ptr_bits - 1) { + ndelta = 1; + int ndelta_limit; + if (lg_base == ptr_bits - 2) { + ndelta_limit = ngroup - 1; + } else { + ndelta_limit = ngroup; + } + while (ndelta <= ndelta_limit) { + sc_t *sc = &sc_data->sc[index]; + size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index, + lg_base, lg_delta, ndelta); + if (sc->lg_delta_lookup != 0) { + nlbins = index + 1; + /* Final written value is correct. */ + lookup_maxclass = (ZU(1) << lg_base) + + (ZU(ndelta) << lg_delta); + } + if (sc->psz) { + npsizes++; + } + if (sc->bin) { + nbins++; + /* Final written value is correct. */ + small_maxclass = (ZU(1) << lg_base) + + (ZU(ndelta) << lg_delta); + if (lg_ngroup > 0) { + lg_large_minclass = lg_base + 1; + } else { + lg_large_minclass = lg_base + 2; + } + } + large_maxclass = (ZU(1) << lg_base) + + (ZU(ndelta) << lg_delta); + index++; + ndelta++; + } + lg_base++; + lg_delta++; + } + /* Additional outputs. */ + int nsizes = index; + unsigned lg_ceil_nsizes = lg_ceil(nsizes); + + /* Fill in the output data. */ + sc_data->ntiny = ntiny; + sc_data->nlbins = nlbins; + sc_data->nbins = nbins; + sc_data->nsizes = nsizes; + sc_data->lg_ceil_nsizes = lg_ceil_nsizes; + sc_data->npsizes = npsizes; + sc_data->lg_tiny_maxclass = lg_tiny_maxclass; + sc_data->lookup_maxclass = lookup_maxclass; + sc_data->small_maxclass = small_maxclass; + sc_data->lg_large_minclass = lg_large_minclass; + sc_data->large_minclass = (ZU(1) << lg_large_minclass); + sc_data->large_maxclass = large_maxclass; + + /* + * We compute these values in two ways: + * - Incrementally, as above. + * - In macros, in sc.h. + * The computation is easier when done incrementally, but putting it in + * a constant makes it available to the fast paths without having to + * touch the extra global cacheline. We assert, however, that the two + * computations are equivalent. + */ + assert(sc_data->npsizes == SC_NPSIZES); + assert(sc_data->lg_tiny_maxclass == SC_LG_TINY_MAXCLASS); + assert(sc_data->small_maxclass == SC_SMALL_MAXCLASS); + assert(sc_data->large_minclass == SC_LARGE_MINCLASS); + assert(sc_data->lg_large_minclass == SC_LG_LARGE_MINCLASS); + assert(sc_data->large_maxclass == SC_LARGE_MAXCLASS); + + /* + * In the allocation fastpath, we want to assume that we can + * unconditionally subtract the requested allocation size from + * a ssize_t, and detect passing through 0 correctly. This + * results in optimal generated code. For this to work, the + * maximum allocation size must be less than SSIZE_MAX. + */ + assert(SC_LARGE_MAXCLASS < SSIZE_MAX); +} + +void +sc_data_init(sc_data_t *sc_data) { + assert(!sc_data->initialized); + + int lg_max_lookup = 12; + + size_classes(sc_data, LG_SIZEOF_PTR, LG_QUANTUM, SC_LG_TINY_MIN, + lg_max_lookup, LG_PAGE, 2); + + sc_data->initialized = true; +} + +static void +sc_data_update_sc_slab_size(sc_t *sc, size_t reg_size, size_t pgs_guess) { + size_t min_pgs = reg_size / PAGE; + if (reg_size % PAGE != 0) { + min_pgs++; + } + /* + * BITMAP_MAXBITS is actually determined by putting the smallest + * possible size-class on one page, so this can never be 0. + */ + size_t max_pgs = BITMAP_MAXBITS * reg_size / PAGE; + + assert(min_pgs <= max_pgs); + assert(min_pgs > 0); + assert(max_pgs >= 1); + if (pgs_guess < min_pgs) { + sc->pgs = (int)min_pgs; + } else if (pgs_guess > max_pgs) { + sc->pgs = (int)max_pgs; + } else { + sc->pgs = (int)pgs_guess; + } +} + +void +sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs) { + assert(data->initialized); + for (int i = 0; i < data->nsizes; i++) { + sc_t *sc = &data->sc[i]; + if (!sc->bin) { + break; + } + size_t reg_size = reg_size_compute(sc->lg_base, sc->lg_delta, + sc->ndelta); + if (begin <= reg_size && reg_size <= end) { + sc_data_update_sc_slab_size(sc, reg_size, pgs); + } + } +} + +void +sc_boot(sc_data_t *data) { + sc_data_init(data); +} diff --git a/deps/jemalloc/src/stats.c b/deps/jemalloc/src/stats.c new file mode 100644 index 0000000..118e05d --- /dev/null +++ b/deps/jemalloc/src/stats.c @@ -0,0 +1,1457 @@ +#define JEMALLOC_STATS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/emitter.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_prof.h" + +const char *global_mutex_names[mutex_prof_num_global_mutexes] = { +#define OP(mtx) #mtx, + MUTEX_PROF_GLOBAL_MUTEXES +#undef OP +}; + +const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = { +#define OP(mtx) #mtx, + MUTEX_PROF_ARENA_MUTEXES +#undef OP +}; + +#define CTL_GET(n, v, t) do { \ + size_t sz = sizeof(t); \ + xmallctl(n, (void *)v, &sz, NULL, 0); \ +} while (0) + +#define CTL_M2_GET(n, i, v, t) do { \ + size_t mib[CTL_MAX_DEPTH]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = (i); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ +} while (0) + +#define CTL_M2_M4_GET(n, i, j, v, t) do { \ + size_t mib[CTL_MAX_DEPTH]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = (i); \ + mib[4] = (j); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ +} while (0) + +/******************************************************************************/ +/* Data. */ + +bool opt_stats_print = false; +char opt_stats_print_opts[stats_print_tot_num_options+1] = ""; + +/******************************************************************************/ + +static uint64_t +rate_per_second(uint64_t value, uint64_t uptime_ns) { + uint64_t billion = 1000000000; + if (uptime_ns == 0 || value == 0) { + return 0; + } + if (uptime_ns < billion) { + return value; + } else { + uint64_t uptime_s = uptime_ns / billion; + return value / uptime_s; + } +} + +/* Calculate x.yyy and output a string (takes a fixed sized char array). */ +static bool +get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) { + if (divisor == 0 || dividend > divisor) { + /* The rate is not supposed to be greater than 1. */ + return true; + } + if (dividend > 0) { + assert(UINT64_MAX / dividend >= 1000); + } + + unsigned n = (unsigned)((dividend * 1000) / divisor); + if (n < 10) { + malloc_snprintf(str, 6, "0.00%u", n); + } else if (n < 100) { + malloc_snprintf(str, 6, "0.0%u", n); + } else if (n < 1000) { + malloc_snprintf(str, 6, "0.%u", n); + } else { + malloc_snprintf(str, 6, "1"); + } + + return false; +} + +#define MUTEX_CTL_STR_MAX_LENGTH 128 +static void +gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix, + const char *mutex, const char *counter) { + malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter); +} + +static void +mutex_stats_init_cols(emitter_row_t *row, const char *table_name, + emitter_col_t *name, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { + mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0; + mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0; + + emitter_col_t *col; + + if (name != NULL) { + emitter_col_init(name, row); + name->justify = emitter_justify_left; + name->width = 21; + name->type = emitter_type_title; + name->str_val = table_name; + } + +#define WIDTH_uint32_t 12 +#define WIDTH_uint64_t 16 +#define OP(counter, counter_type, human, derived, base_counter) \ + col = &col_##counter_type[k_##counter_type]; \ + ++k_##counter_type; \ + emitter_col_init(col, row); \ + col->justify = emitter_justify_right; \ + col->width = derived ? 8 : WIDTH_##counter_type; \ + col->type = emitter_type_title; \ + col->str_val = human; + MUTEX_PROF_COUNTERS +#undef OP +#undef WIDTH_uint32_t +#undef WIDTH_uint64_t + col_uint64_t[mutex_counter_total_wait_time_ps].width = 10; +} + +static void +mutex_stats_read_global(const char *name, emitter_col_t *col_name, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], + uint64_t uptime) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + + col_name->str_val = name; + + emitter_col_t *dst; +#define EMITTER_TYPE_uint32_t emitter_type_uint32 +#define EMITTER_TYPE_uint64_t emitter_type_uint64 +#define OP(counter, counter_type, human, derived, base_counter) \ + dst = &col_##counter_type[mutex_counter_##counter]; \ + dst->type = EMITTER_TYPE_##counter_type; \ + if (!derived) { \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "mutexes", name, #counter); \ + CTL_GET(cmd, (counter_type *)&dst->bool_val, counter_type); \ + } else { \ + emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \ + dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \ + } + MUTEX_PROF_COUNTERS +#undef OP +#undef EMITTER_TYPE_uint32_t +#undef EMITTER_TYPE_uint64_t +} + +static void +mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind, + const char *name, emitter_col_t *col_name, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], + uint64_t uptime) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + + col_name->str_val = name; + + emitter_col_t *dst; +#define EMITTER_TYPE_uint32_t emitter_type_uint32 +#define EMITTER_TYPE_uint64_t emitter_type_uint64 +#define OP(counter, counter_type, human, derived, base_counter) \ + dst = &col_##counter_type[mutex_counter_##counter]; \ + dst->type = EMITTER_TYPE_##counter_type; \ + if (!derived) { \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "arenas.0.mutexes", arena_mutex_names[mutex_ind], #counter);\ + CTL_M2_GET(cmd, arena_ind, (counter_type *)&dst->bool_val, counter_type); \ + } else { \ + emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \ + dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \ + } + MUTEX_PROF_COUNTERS +#undef OP +#undef EMITTER_TYPE_uint32_t +#undef EMITTER_TYPE_uint64_t +} + +static void +mutex_stats_read_arena_bin(unsigned arena_ind, unsigned bin_ind, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters], + uint64_t uptime) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + emitter_col_t *dst; + +#define EMITTER_TYPE_uint32_t emitter_type_uint32 +#define EMITTER_TYPE_uint64_t emitter_type_uint64 +#define OP(counter, counter_type, human, derived, base_counter) \ + dst = &col_##counter_type[mutex_counter_##counter]; \ + dst->type = EMITTER_TYPE_##counter_type; \ + if (!derived) { \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "arenas.0.bins.0","mutex", #counter); \ + CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \ + (counter_type *)&dst->bool_val, counter_type); \ + } else { \ + emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \ + dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \ + } + MUTEX_PROF_COUNTERS +#undef OP +#undef EMITTER_TYPE_uint32_t +#undef EMITTER_TYPE_uint64_t +} + +/* "row" can be NULL to avoid emitting in table mode. */ +static void +mutex_stats_emit(emitter_t *emitter, emitter_row_t *row, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { + if (row != NULL) { + emitter_table_row(emitter, row); + } + + mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0; + mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0; + + emitter_col_t *col; + +#define EMITTER_TYPE_uint32_t emitter_type_uint32 +#define EMITTER_TYPE_uint64_t emitter_type_uint64 +#define OP(counter, type, human, derived, base_counter) \ + if (!derived) { \ + col = &col_##type[k_##type]; \ + ++k_##type; \ + emitter_json_kv(emitter, #counter, EMITTER_TYPE_##type, \ + (const void *)&col->bool_val); \ + } + MUTEX_PROF_COUNTERS; +#undef OP +#undef EMITTER_TYPE_uint32_t +#undef EMITTER_TYPE_uint64_t +} + +#define COL(row_name, column_name, left_or_right, col_width, etype) \ + emitter_col_t col_##column_name; \ + emitter_col_init(&col_##column_name, &row_name); \ + col_##column_name.justify = emitter_justify_##left_or_right; \ + col_##column_name.width = col_width; \ + col_##column_name.type = emitter_type_##etype; + +#define COL_HDR(row_name, column_name, human, left_or_right, col_width, etype) \ + COL(row_name, column_name, left_or_right, col_width, etype) \ + emitter_col_t header_##column_name; \ + emitter_col_init(&header_##column_name, &header_##row_name); \ + header_##column_name.justify = emitter_justify_##left_or_right; \ + header_##column_name.width = col_width; \ + header_##column_name.type = emitter_type_title; \ + header_##column_name.str_val = human ? human : #column_name; + + +static void +stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t uptime) { + size_t page; + bool in_gap, in_gap_prev; + unsigned nbins, j; + + CTL_GET("arenas.page", &page, size_t); + + CTL_GET("arenas.nbins", &nbins, unsigned); + + emitter_row_t header_row; + emitter_row_init(&header_row); + + emitter_row_t row; + emitter_row_init(&row); + + COL_HDR(row, size, NULL, right, 20, size) + COL_HDR(row, ind, NULL, right, 4, unsigned) + COL_HDR(row, allocated, NULL, right, 13, uint64) + COL_HDR(row, nmalloc, NULL, right, 13, uint64) + COL_HDR(row, nmalloc_ps, "(#/sec)", right, 8, uint64) + COL_HDR(row, ndalloc, NULL, right, 13, uint64) + COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64) + COL_HDR(row, nrequests, NULL, right, 13, uint64) + COL_HDR(row, nrequests_ps, "(#/sec)", right, 10, uint64) + COL_HDR(row, nshards, NULL, right, 9, unsigned) + COL_HDR(row, curregs, NULL, right, 13, size) + COL_HDR(row, curslabs, NULL, right, 13, size) + COL_HDR(row, nonfull_slabs, NULL, right, 15, size) + COL_HDR(row, regs, NULL, right, 5, unsigned) + COL_HDR(row, pgs, NULL, right, 4, size) + /* To buffer a right- and left-justified column. */ + COL_HDR(row, justify_spacer, NULL, right, 1, title) + COL_HDR(row, util, NULL, right, 6, title) + COL_HDR(row, nfills, NULL, right, 13, uint64) + COL_HDR(row, nfills_ps, "(#/sec)", right, 8, uint64) + COL_HDR(row, nflushes, NULL, right, 13, uint64) + COL_HDR(row, nflushes_ps, "(#/sec)", right, 8, uint64) + COL_HDR(row, nslabs, NULL, right, 13, uint64) + COL_HDR(row, nreslabs, NULL, right, 13, uint64) + COL_HDR(row, nreslabs_ps, "(#/sec)", right, 8, uint64) + + /* Don't want to actually print the name. */ + header_justify_spacer.str_val = " "; + col_justify_spacer.str_val = " "; + + emitter_col_t col_mutex64[mutex_prof_num_uint64_t_counters]; + emitter_col_t col_mutex32[mutex_prof_num_uint32_t_counters]; + + emitter_col_t header_mutex64[mutex_prof_num_uint64_t_counters]; + emitter_col_t header_mutex32[mutex_prof_num_uint32_t_counters]; + + if (mutex) { + mutex_stats_init_cols(&row, NULL, NULL, col_mutex64, + col_mutex32); + mutex_stats_init_cols(&header_row, NULL, NULL, header_mutex64, + header_mutex32); + } + + /* + * We print a "bins:" header as part of the table row; we need to adjust + * the header size column to compensate. + */ + header_size.width -=5; + emitter_table_printf(emitter, "bins:"); + emitter_table_row(emitter, &header_row); + emitter_json_array_kv_begin(emitter, "bins"); + + for (j = 0, in_gap = false; j < nbins; j++) { + uint64_t nslabs; + size_t reg_size, slab_size, curregs; + size_t curslabs; + size_t nonfull_slabs; + uint32_t nregs, nshards; + uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t nreslabs; + + CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs, + uint64_t); + in_gap_prev = in_gap; + in_gap = (nslabs == 0); + + if (in_gap_prev && !in_gap) { + emitter_table_printf(emitter, + " ---\n"); + } + + CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); + CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); + CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t); + CTL_M2_GET("arenas.bin.0.nshards", j, &nshards, uint32_t); + + CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, + size_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, + &nrequests, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs, + size_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nonfull_slabs", i, j, &nonfull_slabs, + size_t); + + if (mutex) { + mutex_stats_read_arena_bin(i, j, col_mutex64, + col_mutex32, uptime); + } + + emitter_json_object_begin(emitter); + emitter_json_kv(emitter, "nmalloc", emitter_type_uint64, + &nmalloc); + emitter_json_kv(emitter, "ndalloc", emitter_type_uint64, + &ndalloc); + emitter_json_kv(emitter, "curregs", emitter_type_size, + &curregs); + emitter_json_kv(emitter, "nrequests", emitter_type_uint64, + &nrequests); + emitter_json_kv(emitter, "nfills", emitter_type_uint64, + &nfills); + emitter_json_kv(emitter, "nflushes", emitter_type_uint64, + &nflushes); + emitter_json_kv(emitter, "nreslabs", emitter_type_uint64, + &nreslabs); + emitter_json_kv(emitter, "curslabs", emitter_type_size, + &curslabs); + emitter_json_kv(emitter, "nonfull_slabs", emitter_type_size, + &nonfull_slabs); + if (mutex) { + emitter_json_object_kv_begin(emitter, "mutex"); + mutex_stats_emit(emitter, NULL, col_mutex64, + col_mutex32); + emitter_json_object_end(emitter); + } + emitter_json_object_end(emitter); + + size_t availregs = nregs * curslabs; + char util[6]; + if (get_rate_str((uint64_t)curregs, (uint64_t)availregs, util)) + { + if (availregs == 0) { + malloc_snprintf(util, sizeof(util), "1"); + } else if (curregs > availregs) { + /* + * Race detected: the counters were read in + * separate mallctl calls and concurrent + * operations happened in between. In this case + * no meaningful utilization can be computed. + */ + malloc_snprintf(util, sizeof(util), " race"); + } else { + not_reached(); + } + } + + col_size.size_val = reg_size; + col_ind.unsigned_val = j; + col_allocated.size_val = curregs * reg_size; + col_nmalloc.uint64_val = nmalloc; + col_nmalloc_ps.uint64_val = rate_per_second(nmalloc, uptime); + col_ndalloc.uint64_val = ndalloc; + col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime); + col_nrequests.uint64_val = nrequests; + col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime); + col_nshards.unsigned_val = nshards; + col_curregs.size_val = curregs; + col_curslabs.size_val = curslabs; + col_nonfull_slabs.size_val = nonfull_slabs; + col_regs.unsigned_val = nregs; + col_pgs.size_val = slab_size / page; + col_util.str_val = util; + col_nfills.uint64_val = nfills; + col_nfills_ps.uint64_val = rate_per_second(nfills, uptime); + col_nflushes.uint64_val = nflushes; + col_nflushes_ps.uint64_val = rate_per_second(nflushes, uptime); + col_nslabs.uint64_val = nslabs; + col_nreslabs.uint64_val = nreslabs; + col_nreslabs_ps.uint64_val = rate_per_second(nreslabs, uptime); + + /* + * Note that mutex columns were initialized above, if mutex == + * true. + */ + + emitter_table_row(emitter, &row); + } + emitter_json_array_end(emitter); /* Close "bins". */ + + if (in_gap) { + emitter_table_printf(emitter, " ---\n"); + } +} + +static void +stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) { + unsigned nbins, nlextents, j; + bool in_gap, in_gap_prev; + + CTL_GET("arenas.nbins", &nbins, unsigned); + CTL_GET("arenas.nlextents", &nlextents, unsigned); + + emitter_row_t header_row; + emitter_row_init(&header_row); + emitter_row_t row; + emitter_row_init(&row); + + COL_HDR(row, size, NULL, right, 20, size) + COL_HDR(row, ind, NULL, right, 4, unsigned) + COL_HDR(row, allocated, NULL, right, 13, size) + COL_HDR(row, nmalloc, NULL, right, 13, uint64) + COL_HDR(row, nmalloc_ps, "(#/sec)", right, 8, uint64) + COL_HDR(row, ndalloc, NULL, right, 13, uint64) + COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64) + COL_HDR(row, nrequests, NULL, right, 13, uint64) + COL_HDR(row, nrequests_ps, "(#/sec)", right, 8, uint64) + COL_HDR(row, curlextents, NULL, right, 13, size) + + /* As with bins, we label the large extents table. */ + header_size.width -= 6; + emitter_table_printf(emitter, "large:"); + emitter_table_row(emitter, &header_row); + emitter_json_array_kv_begin(emitter, "lextents"); + + for (j = 0, in_gap = false; j < nlextents; j++) { + uint64_t nmalloc, ndalloc, nrequests; + size_t lextent_size, curlextents; + + CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j, + &nmalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j, + &ndalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j, + &nrequests, uint64_t); + in_gap_prev = in_gap; + in_gap = (nrequests == 0); + + if (in_gap_prev && !in_gap) { + emitter_table_printf(emitter, + " ---\n"); + } + + CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j, + &curlextents, size_t); + + emitter_json_object_begin(emitter); + emitter_json_kv(emitter, "curlextents", emitter_type_size, + &curlextents); + emitter_json_object_end(emitter); + + col_size.size_val = lextent_size; + col_ind.unsigned_val = nbins + j; + col_allocated.size_val = curlextents * lextent_size; + col_nmalloc.uint64_val = nmalloc; + col_nmalloc_ps.uint64_val = rate_per_second(nmalloc, uptime); + col_ndalloc.uint64_val = ndalloc; + col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime); + col_nrequests.uint64_val = nrequests; + col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime); + col_curlextents.size_val = curlextents; + + if (!in_gap) { + emitter_table_row(emitter, &row); + } + } + emitter_json_array_end(emitter); /* Close "lextents". */ + if (in_gap) { + emitter_table_printf(emitter, " ---\n"); + } +} + +static void +stats_arena_extents_print(emitter_t *emitter, unsigned i) { + unsigned j; + bool in_gap, in_gap_prev; + emitter_row_t header_row; + emitter_row_init(&header_row); + emitter_row_t row; + emitter_row_init(&row); + + COL_HDR(row, size, NULL, right, 20, size) + COL_HDR(row, ind, NULL, right, 4, unsigned) + COL_HDR(row, ndirty, NULL, right, 13, size) + COL_HDR(row, dirty, NULL, right, 13, size) + COL_HDR(row, nmuzzy, NULL, right, 13, size) + COL_HDR(row, muzzy, NULL, right, 13, size) + COL_HDR(row, nretained, NULL, right, 13, size) + COL_HDR(row, retained, NULL, right, 13, size) + COL_HDR(row, ntotal, NULL, right, 13, size) + COL_HDR(row, total, NULL, right, 13, size) + + /* Label this section. */ + header_size.width -= 8; + emitter_table_printf(emitter, "extents:"); + emitter_table_row(emitter, &header_row); + emitter_json_array_kv_begin(emitter, "extents"); + + in_gap = false; + for (j = 0; j < SC_NPSIZES; j++) { + size_t ndirty, nmuzzy, nretained, total, dirty_bytes, + muzzy_bytes, retained_bytes, total_bytes; + CTL_M2_M4_GET("stats.arenas.0.extents.0.ndirty", i, j, + &ndirty, size_t); + CTL_M2_M4_GET("stats.arenas.0.extents.0.nmuzzy", i, j, + &nmuzzy, size_t); + CTL_M2_M4_GET("stats.arenas.0.extents.0.nretained", i, j, + &nretained, size_t); + CTL_M2_M4_GET("stats.arenas.0.extents.0.dirty_bytes", i, j, + &dirty_bytes, size_t); + CTL_M2_M4_GET("stats.arenas.0.extents.0.muzzy_bytes", i, j, + &muzzy_bytes, size_t); + CTL_M2_M4_GET("stats.arenas.0.extents.0.retained_bytes", i, j, + &retained_bytes, size_t); + total = ndirty + nmuzzy + nretained; + total_bytes = dirty_bytes + muzzy_bytes + retained_bytes; + + in_gap_prev = in_gap; + in_gap = (total == 0); + + if (in_gap_prev && !in_gap) { + emitter_table_printf(emitter, + " ---\n"); + } + + emitter_json_object_begin(emitter); + emitter_json_kv(emitter, "ndirty", emitter_type_size, &ndirty); + emitter_json_kv(emitter, "nmuzzy", emitter_type_size, &nmuzzy); + emitter_json_kv(emitter, "nretained", emitter_type_size, + &nretained); + + emitter_json_kv(emitter, "dirty_bytes", emitter_type_size, + &dirty_bytes); + emitter_json_kv(emitter, "muzzy_bytes", emitter_type_size, + &muzzy_bytes); + emitter_json_kv(emitter, "retained_bytes", emitter_type_size, + &retained_bytes); + emitter_json_object_end(emitter); + + col_size.size_val = sz_pind2sz(j); + col_ind.size_val = j; + col_ndirty.size_val = ndirty; + col_dirty.size_val = dirty_bytes; + col_nmuzzy.size_val = nmuzzy; + col_muzzy.size_val = muzzy_bytes; + col_nretained.size_val = nretained; + col_retained.size_val = retained_bytes; + col_ntotal.size_val = total; + col_total.size_val = total_bytes; + + if (!in_gap) { + emitter_table_row(emitter, &row); + } + } + emitter_json_array_end(emitter); /* Close "extents". */ + if (in_gap) { + emitter_table_printf(emitter, " ---\n"); + } +} + +static void +stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptime) { + emitter_row_t row; + emitter_col_t col_name; + emitter_col_t col64[mutex_prof_num_uint64_t_counters]; + emitter_col_t col32[mutex_prof_num_uint32_t_counters]; + + emitter_row_init(&row); + mutex_stats_init_cols(&row, "", &col_name, col64, col32); + + emitter_json_object_kv_begin(emitter, "mutexes"); + emitter_table_row(emitter, &row); + + for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes; + i++) { + const char *name = arena_mutex_names[i]; + emitter_json_object_kv_begin(emitter, name); + mutex_stats_read_arena(arena_ind, i, name, &col_name, col64, + col32, uptime); + mutex_stats_emit(emitter, &row, col64, col32); + emitter_json_object_end(emitter); /* Close the mutex dict. */ + } + emitter_json_object_end(emitter); /* End "mutexes". */ +} + +static void +stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, + bool mutex, bool extents) { + unsigned nthreads; + const char *dss; + ssize_t dirty_decay_ms, muzzy_decay_ms; + size_t page, pactive, pdirty, pmuzzy, mapped, retained; + size_t base, internal, resident, metadata_thp, extent_avail; + uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; + uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; + size_t small_allocated; + uint64_t small_nmalloc, small_ndalloc, small_nrequests, small_nfills, + small_nflushes; + size_t large_allocated; + uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills, + large_nflushes; + size_t tcache_bytes, abandoned_vm; + uint64_t uptime; + + CTL_GET("arenas.page", &page, size_t); + + CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); + emitter_kv(emitter, "nthreads", "assigned threads", + emitter_type_unsigned, &nthreads); + + CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t); + emitter_kv(emitter, "uptime_ns", "uptime", emitter_type_uint64, + &uptime); + + CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); + emitter_kv(emitter, "dss", "dss allocation precedence", + emitter_type_string, &dss); + + CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms, + ssize_t); + CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms, + ssize_t); + CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); + CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); + CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t); + CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t); + CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise, + uint64_t); + CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise, + uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t); + + emitter_row_t decay_row; + emitter_row_init(&decay_row); + + /* JSON-style emission. */ + emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, + &dirty_decay_ms); + emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, + &muzzy_decay_ms); + + emitter_json_kv(emitter, "pactive", emitter_type_size, &pactive); + emitter_json_kv(emitter, "pdirty", emitter_type_size, &pdirty); + emitter_json_kv(emitter, "pmuzzy", emitter_type_size, &pmuzzy); + + emitter_json_kv(emitter, "dirty_npurge", emitter_type_uint64, + &dirty_npurge); + emitter_json_kv(emitter, "dirty_nmadvise", emitter_type_uint64, + &dirty_nmadvise); + emitter_json_kv(emitter, "dirty_purged", emitter_type_uint64, + &dirty_purged); + + emitter_json_kv(emitter, "muzzy_npurge", emitter_type_uint64, + &muzzy_npurge); + emitter_json_kv(emitter, "muzzy_nmadvise", emitter_type_uint64, + &muzzy_nmadvise); + emitter_json_kv(emitter, "muzzy_purged", emitter_type_uint64, + &muzzy_purged); + + /* Table-style emission. */ + COL(decay_row, decay_type, right, 9, title); + col_decay_type.str_val = "decaying:"; + + COL(decay_row, decay_time, right, 6, title); + col_decay_time.str_val = "time"; + + COL(decay_row, decay_npages, right, 13, title); + col_decay_npages.str_val = "npages"; + + COL(decay_row, decay_sweeps, right, 13, title); + col_decay_sweeps.str_val = "sweeps"; + + COL(decay_row, decay_madvises, right, 13, title); + col_decay_madvises.str_val = "madvises"; + + COL(decay_row, decay_purged, right, 13, title); + col_decay_purged.str_val = "purged"; + + /* Title row. */ + emitter_table_row(emitter, &decay_row); + + /* Dirty row. */ + col_decay_type.str_val = "dirty:"; + + if (dirty_decay_ms >= 0) { + col_decay_time.type = emitter_type_ssize; + col_decay_time.ssize_val = dirty_decay_ms; + } else { + col_decay_time.type = emitter_type_title; + col_decay_time.str_val = "N/A"; + } + + col_decay_npages.type = emitter_type_size; + col_decay_npages.size_val = pdirty; + + col_decay_sweeps.type = emitter_type_uint64; + col_decay_sweeps.uint64_val = dirty_npurge; + + col_decay_madvises.type = emitter_type_uint64; + col_decay_madvises.uint64_val = dirty_nmadvise; + + col_decay_purged.type = emitter_type_uint64; + col_decay_purged.uint64_val = dirty_purged; + + emitter_table_row(emitter, &decay_row); + + /* Muzzy row. */ + col_decay_type.str_val = "muzzy:"; + + if (muzzy_decay_ms >= 0) { + col_decay_time.type = emitter_type_ssize; + col_decay_time.ssize_val = muzzy_decay_ms; + } else { + col_decay_time.type = emitter_type_title; + col_decay_time.str_val = "N/A"; + } + + col_decay_npages.type = emitter_type_size; + col_decay_npages.size_val = pmuzzy; + + col_decay_sweeps.type = emitter_type_uint64; + col_decay_sweeps.uint64_val = muzzy_npurge; + + col_decay_madvises.type = emitter_type_uint64; + col_decay_madvises.uint64_val = muzzy_nmadvise; + + col_decay_purged.type = emitter_type_uint64; + col_decay_purged.uint64_val = muzzy_purged; + + emitter_table_row(emitter, &decay_row); + + /* Small / large / total allocation counts. */ + emitter_row_t alloc_count_row; + emitter_row_init(&alloc_count_row); + + COL(alloc_count_row, count_title, left, 21, title); + col_count_title.str_val = ""; + + COL(alloc_count_row, count_allocated, right, 16, title); + col_count_allocated.str_val = "allocated"; + + COL(alloc_count_row, count_nmalloc, right, 16, title); + col_count_nmalloc.str_val = "nmalloc"; + COL(alloc_count_row, count_nmalloc_ps, right, 8, title); + col_count_nmalloc_ps.str_val = "(#/sec)"; + + COL(alloc_count_row, count_ndalloc, right, 16, title); + col_count_ndalloc.str_val = "ndalloc"; + COL(alloc_count_row, count_ndalloc_ps, right, 8, title); + col_count_ndalloc_ps.str_val = "(#/sec)"; + + COL(alloc_count_row, count_nrequests, right, 16, title); + col_count_nrequests.str_val = "nrequests"; + COL(alloc_count_row, count_nrequests_ps, right, 10, title); + col_count_nrequests_ps.str_val = "(#/sec)"; + + COL(alloc_count_row, count_nfills, right, 16, title); + col_count_nfills.str_val = "nfill"; + COL(alloc_count_row, count_nfills_ps, right, 10, title); + col_count_nfills_ps.str_val = "(#/sec)"; + + COL(alloc_count_row, count_nflushes, right, 16, title); + col_count_nflushes.str_val = "nflush"; + COL(alloc_count_row, count_nflushes_ps, right, 10, title); + col_count_nflushes_ps.str_val = "(#/sec)"; + + emitter_table_row(emitter, &alloc_count_row); + + col_count_nmalloc_ps.type = emitter_type_uint64; + col_count_ndalloc_ps.type = emitter_type_uint64; + col_count_nrequests_ps.type = emitter_type_uint64; + col_count_nfills_ps.type = emitter_type_uint64; + col_count_nflushes_ps.type = emitter_type_uint64; + +#define GET_AND_EMIT_ALLOC_STAT(small_or_large, name, valtype) \ + CTL_M2_GET("stats.arenas.0." #small_or_large "." #name, i, \ + &small_or_large##_##name, valtype##_t); \ + emitter_json_kv(emitter, #name, emitter_type_##valtype, \ + &small_or_large##_##name); \ + col_count_##name.type = emitter_type_##valtype; \ + col_count_##name.valtype##_val = small_or_large##_##name; + + emitter_json_object_kv_begin(emitter, "small"); + col_count_title.str_val = "small:"; + + GET_AND_EMIT_ALLOC_STAT(small, allocated, size) + GET_AND_EMIT_ALLOC_STAT(small, nmalloc, uint64) + col_count_nmalloc_ps.uint64_val = + rate_per_second(col_count_nmalloc.uint64_val, uptime); + GET_AND_EMIT_ALLOC_STAT(small, ndalloc, uint64) + col_count_ndalloc_ps.uint64_val = + rate_per_second(col_count_ndalloc.uint64_val, uptime); + GET_AND_EMIT_ALLOC_STAT(small, nrequests, uint64) + col_count_nrequests_ps.uint64_val = + rate_per_second(col_count_nrequests.uint64_val, uptime); + GET_AND_EMIT_ALLOC_STAT(small, nfills, uint64) + col_count_nfills_ps.uint64_val = + rate_per_second(col_count_nfills.uint64_val, uptime); + GET_AND_EMIT_ALLOC_STAT(small, nflushes, uint64) + col_count_nflushes_ps.uint64_val = + rate_per_second(col_count_nflushes.uint64_val, uptime); + + emitter_table_row(emitter, &alloc_count_row); + emitter_json_object_end(emitter); /* Close "small". */ + + emitter_json_object_kv_begin(emitter, "large"); + col_count_title.str_val = "large:"; + + GET_AND_EMIT_ALLOC_STAT(large, allocated, size) + GET_AND_EMIT_ALLOC_STAT(large, nmalloc, uint64) + col_count_nmalloc_ps.uint64_val = + rate_per_second(col_count_nmalloc.uint64_val, uptime); + GET_AND_EMIT_ALLOC_STAT(large, ndalloc, uint64) + col_count_ndalloc_ps.uint64_val = + rate_per_second(col_count_ndalloc.uint64_val, uptime); + GET_AND_EMIT_ALLOC_STAT(large, nrequests, uint64) + col_count_nrequests_ps.uint64_val = + rate_per_second(col_count_nrequests.uint64_val, uptime); + GET_AND_EMIT_ALLOC_STAT(large, nfills, uint64) + col_count_nfills_ps.uint64_val = + rate_per_second(col_count_nfills.uint64_val, uptime); + GET_AND_EMIT_ALLOC_STAT(large, nflushes, uint64) + col_count_nflushes_ps.uint64_val = + rate_per_second(col_count_nflushes.uint64_val, uptime); + + emitter_table_row(emitter, &alloc_count_row); + emitter_json_object_end(emitter); /* Close "large". */ + +#undef GET_AND_EMIT_ALLOC_STAT + + /* Aggregated small + large stats are emitter only in table mode. */ + col_count_title.str_val = "total:"; + col_count_allocated.size_val = small_allocated + large_allocated; + col_count_nmalloc.uint64_val = small_nmalloc + large_nmalloc; + col_count_ndalloc.uint64_val = small_ndalloc + large_ndalloc; + col_count_nrequests.uint64_val = small_nrequests + large_nrequests; + col_count_nfills.uint64_val = small_nfills + large_nfills; + col_count_nflushes.uint64_val = small_nflushes + large_nflushes; + col_count_nmalloc_ps.uint64_val = + rate_per_second(col_count_nmalloc.uint64_val, uptime); + col_count_ndalloc_ps.uint64_val = + rate_per_second(col_count_ndalloc.uint64_val, uptime); + col_count_nrequests_ps.uint64_val = + rate_per_second(col_count_nrequests.uint64_val, uptime); + col_count_nfills_ps.uint64_val = + rate_per_second(col_count_nfills.uint64_val, uptime); + col_count_nflushes_ps.uint64_val = + rate_per_second(col_count_nflushes.uint64_val, uptime); + emitter_table_row(emitter, &alloc_count_row); + + emitter_row_t mem_count_row; + emitter_row_init(&mem_count_row); + + emitter_col_t mem_count_title; + emitter_col_init(&mem_count_title, &mem_count_row); + mem_count_title.justify = emitter_justify_left; + mem_count_title.width = 21; + mem_count_title.type = emitter_type_title; + mem_count_title.str_val = ""; + + emitter_col_t mem_count_val; + emitter_col_init(&mem_count_val, &mem_count_row); + mem_count_val.justify = emitter_justify_right; + mem_count_val.width = 16; + mem_count_val.type = emitter_type_title; + mem_count_val.str_val = ""; + + emitter_table_row(emitter, &mem_count_row); + mem_count_val.type = emitter_type_size; + + /* Active count in bytes is emitted only in table mode. */ + mem_count_title.str_val = "active:"; + mem_count_val.size_val = pactive * page; + emitter_table_row(emitter, &mem_count_row); + +#define GET_AND_EMIT_MEM_STAT(stat) \ + CTL_M2_GET("stats.arenas.0."#stat, i, &stat, size_t); \ + emitter_json_kv(emitter, #stat, emitter_type_size, &stat); \ + mem_count_title.str_val = #stat":"; \ + mem_count_val.size_val = stat; \ + emitter_table_row(emitter, &mem_count_row); + + GET_AND_EMIT_MEM_STAT(mapped) + GET_AND_EMIT_MEM_STAT(retained) + GET_AND_EMIT_MEM_STAT(base) + GET_AND_EMIT_MEM_STAT(internal) + GET_AND_EMIT_MEM_STAT(metadata_thp) + GET_AND_EMIT_MEM_STAT(tcache_bytes) + GET_AND_EMIT_MEM_STAT(resident) + GET_AND_EMIT_MEM_STAT(abandoned_vm) + GET_AND_EMIT_MEM_STAT(extent_avail) +#undef GET_AND_EMIT_MEM_STAT + + if (mutex) { + stats_arena_mutexes_print(emitter, i, uptime); + } + if (bins) { + stats_arena_bins_print(emitter, mutex, i, uptime); + } + if (large) { + stats_arena_lextents_print(emitter, i, uptime); + } + if (extents) { + stats_arena_extents_print(emitter, i); + } +} + +static void +stats_general_print(emitter_t *emitter) { + const char *cpv; + bool bv, bv2; + unsigned uv; + uint32_t u32v; + uint64_t u64v; + ssize_t ssv, ssv2; + size_t sv, bsz, usz, ssz, sssz, cpsz; + + bsz = sizeof(bool); + usz = sizeof(unsigned); + ssz = sizeof(size_t); + sssz = sizeof(ssize_t); + cpsz = sizeof(const char *); + + CTL_GET("version", &cpv, const char *); + emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv); + + /* config. */ + emitter_dict_begin(emitter, "config", "Build-time option settings"); +#define CONFIG_WRITE_BOOL(name) \ + do { \ + CTL_GET("config."#name, &bv, bool); \ + emitter_kv(emitter, #name, "config."#name, \ + emitter_type_bool, &bv); \ + } while (0) + + CONFIG_WRITE_BOOL(cache_oblivious); + CONFIG_WRITE_BOOL(debug); + CONFIG_WRITE_BOOL(fill); + CONFIG_WRITE_BOOL(lazy_lock); + emitter_kv(emitter, "malloc_conf", "config.malloc_conf", + emitter_type_string, &config_malloc_conf); + + CONFIG_WRITE_BOOL(opt_safety_checks); + CONFIG_WRITE_BOOL(prof); + CONFIG_WRITE_BOOL(prof_libgcc); + CONFIG_WRITE_BOOL(prof_libunwind); + CONFIG_WRITE_BOOL(stats); + CONFIG_WRITE_BOOL(utrace); + CONFIG_WRITE_BOOL(xmalloc); +#undef CONFIG_WRITE_BOOL + emitter_dict_end(emitter); /* Close "config" dict. */ + + /* opt. */ +#define OPT_WRITE(name, var, size, emitter_type) \ + if (je_mallctl("opt."name, (void *)&var, &size, NULL, 0) == \ + 0) { \ + emitter_kv(emitter, name, "opt."name, emitter_type, \ + &var); \ + } + +#define OPT_WRITE_MUTABLE(name, var1, var2, size, emitter_type, \ + altname) \ + if (je_mallctl("opt."name, (void *)&var1, &size, NULL, 0) == \ + 0 && je_mallctl(altname, (void *)&var2, &size, NULL, 0) \ + == 0) { \ + emitter_kv_note(emitter, name, "opt."name, \ + emitter_type, &var1, altname, emitter_type, \ + &var2); \ + } + +#define OPT_WRITE_BOOL(name) OPT_WRITE(name, bv, bsz, emitter_type_bool) +#define OPT_WRITE_BOOL_MUTABLE(name, altname) \ + OPT_WRITE_MUTABLE(name, bv, bv2, bsz, emitter_type_bool, altname) + +#define OPT_WRITE_UNSIGNED(name) \ + OPT_WRITE(name, uv, usz, emitter_type_unsigned) + +#define OPT_WRITE_SIZE_T(name) \ + OPT_WRITE(name, sv, ssz, emitter_type_size) +#define OPT_WRITE_SSIZE_T(name) \ + OPT_WRITE(name, ssv, sssz, emitter_type_ssize) +#define OPT_WRITE_SSIZE_T_MUTABLE(name, altname) \ + OPT_WRITE_MUTABLE(name, ssv, ssv2, sssz, emitter_type_ssize, \ + altname) + +#define OPT_WRITE_CHAR_P(name) \ + OPT_WRITE(name, cpv, cpsz, emitter_type_string) + + emitter_dict_begin(emitter, "opt", "Run-time option settings"); + + OPT_WRITE_BOOL("abort") + OPT_WRITE_BOOL("abort_conf") + OPT_WRITE_BOOL("confirm_conf") + OPT_WRITE_BOOL("retain") + OPT_WRITE_CHAR_P("dss") + OPT_WRITE_UNSIGNED("narenas") + OPT_WRITE_CHAR_P("percpu_arena") + OPT_WRITE_SIZE_T("oversize_threshold") + OPT_WRITE_CHAR_P("metadata_thp") + OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread") + OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms") + OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms") + OPT_WRITE_SIZE_T("lg_extent_max_active_fit") + OPT_WRITE_CHAR_P("junk") + OPT_WRITE_BOOL("zero") + OPT_WRITE_BOOL("utrace") + OPT_WRITE_BOOL("xmalloc") + OPT_WRITE_BOOL("tcache") + OPT_WRITE_SSIZE_T("lg_tcache_max") + OPT_WRITE_CHAR_P("thp") + OPT_WRITE_BOOL("prof") + OPT_WRITE_CHAR_P("prof_prefix") + OPT_WRITE_BOOL_MUTABLE("prof_active", "prof.active") + OPT_WRITE_BOOL_MUTABLE("prof_thread_active_init", + "prof.thread_active_init") + OPT_WRITE_SSIZE_T_MUTABLE("lg_prof_sample", "prof.lg_sample") + OPT_WRITE_BOOL("prof_accum") + OPT_WRITE_SSIZE_T("lg_prof_interval") + OPT_WRITE_BOOL("prof_gdump") + OPT_WRITE_BOOL("prof_final") + OPT_WRITE_BOOL("prof_leak") + OPT_WRITE_BOOL("stats_print") + OPT_WRITE_CHAR_P("stats_print_opts") + + emitter_dict_end(emitter); + +#undef OPT_WRITE +#undef OPT_WRITE_MUTABLE +#undef OPT_WRITE_BOOL +#undef OPT_WRITE_BOOL_MUTABLE +#undef OPT_WRITE_UNSIGNED +#undef OPT_WRITE_SSIZE_T +#undef OPT_WRITE_SSIZE_T_MUTABLE +#undef OPT_WRITE_CHAR_P + + /* prof. */ + if (config_prof) { + emitter_dict_begin(emitter, "prof", "Profiling settings"); + + CTL_GET("prof.thread_active_init", &bv, bool); + emitter_kv(emitter, "thread_active_init", + "prof.thread_active_init", emitter_type_bool, &bv); + + CTL_GET("prof.active", &bv, bool); + emitter_kv(emitter, "active", "prof.active", emitter_type_bool, + &bv); + + CTL_GET("prof.gdump", &bv, bool); + emitter_kv(emitter, "gdump", "prof.gdump", emitter_type_bool, + &bv); + + CTL_GET("prof.interval", &u64v, uint64_t); + emitter_kv(emitter, "interval", "prof.interval", + emitter_type_uint64, &u64v); + + CTL_GET("prof.lg_sample", &ssv, ssize_t); + emitter_kv(emitter, "lg_sample", "prof.lg_sample", + emitter_type_ssize, &ssv); + + emitter_dict_end(emitter); /* Close "prof". */ + } + + /* arenas. */ + /* + * The json output sticks arena info into an "arenas" dict; the table + * output puts them at the top-level. + */ + emitter_json_object_kv_begin(emitter, "arenas"); + + CTL_GET("arenas.narenas", &uv, unsigned); + emitter_kv(emitter, "narenas", "Arenas", emitter_type_unsigned, &uv); + + /* + * Decay settings are emitted only in json mode; in table mode, they're + * emitted as notes with the opt output, above. + */ + CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t); + emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, &ssv); + + CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t); + emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, &ssv); + + CTL_GET("arenas.quantum", &sv, size_t); + emitter_kv(emitter, "quantum", "Quantum size", emitter_type_size, &sv); + + CTL_GET("arenas.page", &sv, size_t); + emitter_kv(emitter, "page", "Page size", emitter_type_size, &sv); + + if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { + emitter_kv(emitter, "tcache_max", + "Maximum thread-cached size class", emitter_type_size, &sv); + } + + unsigned nbins; + CTL_GET("arenas.nbins", &nbins, unsigned); + emitter_kv(emitter, "nbins", "Number of bin size classes", + emitter_type_unsigned, &nbins); + + unsigned nhbins; + CTL_GET("arenas.nhbins", &nhbins, unsigned); + emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes", + emitter_type_unsigned, &nhbins); + + /* + * We do enough mallctls in a loop that we actually want to omit them + * (not just omit the printing). + */ + if (emitter->output == emitter_output_json) { + emitter_json_array_kv_begin(emitter, "bin"); + for (unsigned i = 0; i < nbins; i++) { + emitter_json_object_begin(emitter); + + CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t); + emitter_json_kv(emitter, "size", emitter_type_size, + &sv); + + CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t); + emitter_json_kv(emitter, "nregs", emitter_type_uint32, + &u32v); + + CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t); + emitter_json_kv(emitter, "slab_size", emitter_type_size, + &sv); + + CTL_M2_GET("arenas.bin.0.nshards", i, &u32v, uint32_t); + emitter_json_kv(emitter, "nshards", emitter_type_uint32, + &u32v); + + emitter_json_object_end(emitter); + } + emitter_json_array_end(emitter); /* Close "bin". */ + } + + unsigned nlextents; + CTL_GET("arenas.nlextents", &nlextents, unsigned); + emitter_kv(emitter, "nlextents", "Number of large size classes", + emitter_type_unsigned, &nlextents); + + if (emitter->output == emitter_output_json) { + emitter_json_array_kv_begin(emitter, "lextent"); + for (unsigned i = 0; i < nlextents; i++) { + emitter_json_object_begin(emitter); + + CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t); + emitter_json_kv(emitter, "size", emitter_type_size, + &sv); + + emitter_json_object_end(emitter); + } + emitter_json_array_end(emitter); /* Close "lextent". */ + } + + emitter_json_object_end(emitter); /* Close "arenas" */ +} + +static void +stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, + bool unmerged, bool bins, bool large, bool mutex, bool extents) { + /* + * These should be deleted. We keep them around for a while, to aid in + * the transition to the emitter code. + */ + size_t allocated, active, metadata, metadata_thp, resident, mapped, + retained; + size_t num_background_threads; + uint64_t background_thread_num_runs, background_thread_run_interval; + + CTL_GET("stats.allocated", &allocated, size_t); + CTL_GET("stats.active", &active, size_t); + CTL_GET("stats.metadata", &metadata, size_t); + CTL_GET("stats.metadata_thp", &metadata_thp, size_t); + CTL_GET("stats.resident", &resident, size_t); + CTL_GET("stats.mapped", &mapped, size_t); + CTL_GET("stats.retained", &retained, size_t); + + if (have_background_thread) { + CTL_GET("stats.background_thread.num_threads", + &num_background_threads, size_t); + CTL_GET("stats.background_thread.num_runs", + &background_thread_num_runs, uint64_t); + CTL_GET("stats.background_thread.run_interval", + &background_thread_run_interval, uint64_t); + } else { + num_background_threads = 0; + background_thread_num_runs = 0; + background_thread_run_interval = 0; + } + + /* Generic global stats. */ + emitter_json_object_kv_begin(emitter, "stats"); + emitter_json_kv(emitter, "allocated", emitter_type_size, &allocated); + emitter_json_kv(emitter, "active", emitter_type_size, &active); + emitter_json_kv(emitter, "metadata", emitter_type_size, &metadata); + emitter_json_kv(emitter, "metadata_thp", emitter_type_size, + &metadata_thp); + emitter_json_kv(emitter, "resident", emitter_type_size, &resident); + emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped); + emitter_json_kv(emitter, "retained", emitter_type_size, &retained); + + emitter_table_printf(emitter, "Allocated: %zu, active: %zu, " + "metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, " + "retained: %zu\n", allocated, active, metadata, metadata_thp, + resident, mapped, retained); + + /* Background thread stats. */ + emitter_json_object_kv_begin(emitter, "background_thread"); + emitter_json_kv(emitter, "num_threads", emitter_type_size, + &num_background_threads); + emitter_json_kv(emitter, "num_runs", emitter_type_uint64, + &background_thread_num_runs); + emitter_json_kv(emitter, "run_interval", emitter_type_uint64, + &background_thread_run_interval); + emitter_json_object_end(emitter); /* Close "background_thread". */ + + emitter_table_printf(emitter, "Background threads: %zu, " + "num_runs: %"FMTu64", run_interval: %"FMTu64" ns\n", + num_background_threads, background_thread_num_runs, + background_thread_run_interval); + + if (mutex) { + emitter_row_t row; + emitter_col_t name; + emitter_col_t col64[mutex_prof_num_uint64_t_counters]; + emitter_col_t col32[mutex_prof_num_uint32_t_counters]; + uint64_t uptime; + + emitter_row_init(&row); + mutex_stats_init_cols(&row, "", &name, col64, col32); + + emitter_table_row(emitter, &row); + emitter_json_object_kv_begin(emitter, "mutexes"); + + CTL_M2_GET("stats.arenas.0.uptime", 0, &uptime, uint64_t); + + for (int i = 0; i < mutex_prof_num_global_mutexes; i++) { + mutex_stats_read_global(global_mutex_names[i], &name, + col64, col32, uptime); + emitter_json_object_kv_begin(emitter, global_mutex_names[i]); + mutex_stats_emit(emitter, &row, col64, col32); + emitter_json_object_end(emitter); + } + + emitter_json_object_end(emitter); /* Close "mutexes". */ + } + + emitter_json_object_end(emitter); /* Close "stats". */ + + if (merged || destroyed || unmerged) { + unsigned narenas; + + emitter_json_object_kv_begin(emitter, "stats.arenas"); + + CTL_GET("arenas.narenas", &narenas, unsigned); + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); + size_t sz; + VARIABLE_ARRAY(bool, initialized, narenas); + bool destroyed_initialized; + unsigned i, j, ninitialized; + + xmallctlnametomib("arena.0.initialized", mib, &miblen); + for (i = ninitialized = 0; i < narenas; i++) { + mib[1] = i; + sz = sizeof(bool); + xmallctlbymib(mib, miblen, &initialized[i], &sz, + NULL, 0); + if (initialized[i]) { + ninitialized++; + } + } + mib[1] = MALLCTL_ARENAS_DESTROYED; + sz = sizeof(bool); + xmallctlbymib(mib, miblen, &destroyed_initialized, &sz, + NULL, 0); + + /* Merged stats. */ + if (merged && (ninitialized > 1 || !unmerged)) { + /* Print merged arena stats. */ + emitter_table_printf(emitter, "Merged arenas stats:\n"); + emitter_json_object_kv_begin(emitter, "merged"); + stats_arena_print(emitter, MALLCTL_ARENAS_ALL, bins, + large, mutex, extents); + emitter_json_object_end(emitter); /* Close "merged". */ + } + + /* Destroyed stats. */ + if (destroyed_initialized && destroyed) { + /* Print destroyed arena stats. */ + emitter_table_printf(emitter, + "Destroyed arenas stats:\n"); + emitter_json_object_kv_begin(emitter, "destroyed"); + stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED, + bins, large, mutex, extents); + emitter_json_object_end(emitter); /* Close "destroyed". */ + } + + /* Unmerged stats. */ + if (unmerged) { + for (i = j = 0; i < narenas; i++) { + if (initialized[i]) { + char arena_ind_str[20]; + malloc_snprintf(arena_ind_str, + sizeof(arena_ind_str), "%u", i); + emitter_json_object_kv_begin(emitter, + arena_ind_str); + emitter_table_printf(emitter, + "arenas[%s]:\n", arena_ind_str); + stats_arena_print(emitter, i, bins, + large, mutex, extents); + /* Close "". */ + emitter_json_object_end(emitter); + } + } + } + emitter_json_object_end(emitter); /* Close "stats.arenas". */ + } +} + +void +stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) { + int err; + uint64_t epoch; + size_t u64sz; +#define OPTION(o, v, d, s) bool v = d; + STATS_PRINT_OPTIONS +#undef OPTION + + /* + * Refresh stats, in case mallctl() was called by the application. + * + * Check for OOM here, since refreshing the ctl cache can trigger + * allocation. In practice, none of the subsequent mallctl()-related + * calls in this function will cause OOM if this one succeeds. + * */ + epoch = 1; + u64sz = sizeof(uint64_t); + err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch, + sizeof(uint64_t)); + if (err != 0) { + if (err == EAGAIN) { + malloc_write(": Memory allocation failure in " + "mallctl(\"epoch\", ...)\n"); + return; + } + malloc_write(": Failure in mallctl(\"epoch\", " + "...)\n"); + abort(); + } + + if (opts != NULL) { + for (unsigned i = 0; opts[i] != '\0'; i++) { + switch (opts[i]) { +#define OPTION(o, v, d, s) case o: v = s; break; + STATS_PRINT_OPTIONS +#undef OPTION + default:; + } + } + } + + emitter_t emitter; + emitter_init(&emitter, + json ? emitter_output_json : emitter_output_table, write_cb, + cbopaque); + emitter_begin(&emitter); + emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n"); + emitter_json_object_kv_begin(&emitter, "jemalloc"); + + if (general) { + stats_general_print(&emitter); + } + if (config_stats) { + stats_print_helper(&emitter, merged, destroyed, unmerged, + bins, large, mutex, extents); + } + + emitter_json_object_end(&emitter); /* Closes the "jemalloc" dict. */ + emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n"); + emitter_end(&emitter); +} diff --git a/deps/jemalloc/src/sz.c b/deps/jemalloc/src/sz.c new file mode 100644 index 0000000..8633fb0 --- /dev/null +++ b/deps/jemalloc/src/sz.c @@ -0,0 +1,64 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/sz.h" + +JEMALLOC_ALIGNED(CACHELINE) +size_t sz_pind2sz_tab[SC_NPSIZES+1]; + +static void +sz_boot_pind2sz_tab(const sc_data_t *sc_data) { + int pind = 0; + for (unsigned i = 0; i < SC_NSIZES; i++) { + const sc_t *sc = &sc_data->sc[i]; + if (sc->psz) { + sz_pind2sz_tab[pind] = (ZU(1) << sc->lg_base) + + (ZU(sc->ndelta) << sc->lg_delta); + pind++; + } + } + for (int i = pind; i <= (int)SC_NPSIZES; i++) { + sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE; + } +} + +JEMALLOC_ALIGNED(CACHELINE) +size_t sz_index2size_tab[SC_NSIZES]; + +static void +sz_boot_index2size_tab(const sc_data_t *sc_data) { + for (unsigned i = 0; i < SC_NSIZES; i++) { + const sc_t *sc = &sc_data->sc[i]; + sz_index2size_tab[i] = (ZU(1) << sc->lg_base) + + (ZU(sc->ndelta) << (sc->lg_delta)); + } +} + +/* + * To keep this table small, we divide sizes by the tiny min size, which gives + * the smallest interval for which the result can change. + */ +JEMALLOC_ALIGNED(CACHELINE) +uint8_t sz_size2index_tab[(SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1]; + +static void +sz_boot_size2index_tab(const sc_data_t *sc_data) { + size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1; + size_t dst_ind = 0; + for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max; + sc_ind++) { + const sc_t *sc = &sc_data->sc[sc_ind]; + size_t sz = (ZU(1) << sc->lg_base) + + (ZU(sc->ndelta) << sc->lg_delta); + size_t max_ind = ((sz + (ZU(1) << SC_LG_TINY_MIN) - 1) + >> SC_LG_TINY_MIN); + for (; dst_ind <= max_ind && dst_ind < dst_max; dst_ind++) { + sz_size2index_tab[dst_ind] = sc_ind; + } + } +} + +void +sz_boot(const sc_data_t *sc_data) { + sz_boot_pind2sz_tab(sc_data); + sz_boot_index2size_tab(sc_data); + sz_boot_size2index_tab(sc_data); +} diff --git a/deps/jemalloc/src/tcache.c b/deps/jemalloc/src/tcache.c new file mode 100644 index 0000000..50099a9 --- /dev/null +++ b/deps/jemalloc/src/tcache.c @@ -0,0 +1,798 @@ +#define JEMALLOC_TCACHE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/safety_check.h" +#include "jemalloc/internal/sc.h" + +/******************************************************************************/ +/* Data. */ + +bool opt_tcache = true; +ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; + +cache_bin_info_t *tcache_bin_info; +static unsigned stack_nelms; /* Total stack elms per tcache. */ + +unsigned nhbins; +size_t tcache_maxclass; + +tcaches_t *tcaches; + +/* Index of first element within tcaches that has never been used. */ +static unsigned tcaches_past; + +/* Head of singly linked list tracking available tcaches elements. */ +static tcaches_t *tcaches_avail; + +/* Protects tcaches{,_past,_avail}. */ +static malloc_mutex_t tcaches_mtx; + +/******************************************************************************/ + +size_t +tcache_salloc(tsdn_t *tsdn, const void *ptr) { + return arena_salloc(tsdn, ptr); +} + +void +tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { + szind_t binind = tcache->next_gc_bin; + + cache_bin_t *tbin; + if (binind < SC_NBINS) { + tbin = tcache_small_bin_get(tcache, binind); + } else { + tbin = tcache_large_bin_get(tcache, binind); + } + if (tbin->low_water > 0) { + /* + * Flush (ceiling) 3/4 of the objects below the low water mark. + */ + if (binind < SC_NBINS) { + tcache_bin_flush_small(tsd, tcache, tbin, binind, + tbin->ncached - tbin->low_water + (tbin->low_water + >> 2)); + /* + * Reduce fill count by 2X. Limit lg_fill_div such that + * the fill count is always at least 1. + */ + cache_bin_info_t *tbin_info = &tcache_bin_info[binind]; + if ((tbin_info->ncached_max >> + (tcache->lg_fill_div[binind] + 1)) >= 1) { + tcache->lg_fill_div[binind]++; + } + } else { + tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached + - tbin->low_water + (tbin->low_water >> 2), tcache); + } + } else if (tbin->low_water < 0) { + /* + * Increase fill count by 2X for small bins. Make sure + * lg_fill_div stays greater than 0. + */ + if (binind < SC_NBINS && tcache->lg_fill_div[binind] > 1) { + tcache->lg_fill_div[binind]--; + } + } + tbin->low_water = tbin->ncached; + + tcache->next_gc_bin++; + if (tcache->next_gc_bin == nhbins) { + tcache->next_gc_bin = 0; + } +} + +void * +tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + cache_bin_t *tbin, szind_t binind, bool *tcache_success) { + void *ret; + + assert(tcache->arena != NULL); + arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind, + config_prof ? tcache->prof_accumbytes : 0); + if (config_prof) { + tcache->prof_accumbytes = 0; + } + ret = cache_bin_alloc_easy(tbin, tcache_success); + + return ret; +} + +/* Enabled with --enable-extra-size-check. */ +static void +tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind, + size_t nflush, extent_t **extents){ + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + /* + * Verify that the items in the tcache all have the correct size; this + * is useful for catching sized deallocation bugs, also to fail early + * instead of corrupting metadata. Since this can be turned on for opt + * builds, avoid the branch in the loop. + */ + szind_t szind; + size_t sz_sum = binind * nflush; + for (unsigned i = 0 ; i < nflush; i++) { + rtree_extent_szind_read(tsdn, &extents_rtree, + rtree_ctx, (uintptr_t)*(tbin->avail - 1 - i), true, + &extents[i], &szind); + sz_sum -= szind; + } + if (sz_sum != 0) { + safety_check_fail(": size mismatch in thread cache " + "detected, likely caused by sized deallocation bugs by " + "application. Abort.\n"); + abort(); + } +} + +void +tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, + szind_t binind, unsigned rem) { + bool merged_stats = false; + + assert(binind < SC_NBINS); + assert((cache_bin_sz_t)rem <= tbin->ncached); + + arena_t *arena = tcache->arena; + assert(arena != NULL); + unsigned nflush = tbin->ncached - rem; + VARIABLE_ARRAY(extent_t *, item_extent, nflush); + + /* Look up extent once per item. */ + if (config_opt_safety_checks) { + tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, + nflush, item_extent); + } else { + for (unsigned i = 0 ; i < nflush; i++) { + item_extent[i] = iealloc(tsd_tsdn(tsd), + *(tbin->avail - 1 - i)); + } + } + while (nflush > 0) { + /* Lock the arena bin associated with the first object. */ + extent_t *extent = item_extent[0]; + unsigned bin_arena_ind = extent_arena_ind_get(extent); + arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind, + false); + unsigned binshard = extent_binshard_get(extent); + assert(binshard < bin_infos[binind].n_shards); + bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard]; + + if (config_prof && bin_arena == arena) { + if (arena_prof_accum(tsd_tsdn(tsd), arena, + tcache->prof_accumbytes)) { + prof_idump(tsd_tsdn(tsd)); + } + tcache->prof_accumbytes = 0; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + if (config_stats && bin_arena == arena && !merged_stats) { + merged_stats = true; + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } + unsigned ndeferred = 0; + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + extent = item_extent[i]; + assert(ptr != NULL && extent != NULL); + + if (extent_arena_ind_get(extent) == bin_arena_ind + && extent_binshard_get(extent) == binshard) { + arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), + bin_arena, bin, binind, extent, ptr); + } else { + /* + * This object was allocated via a different + * arena bin than the one that is currently + * locked. Stash the object, so that it can be + * handled in a future pass. + */ + *(tbin->avail - 1 - ndeferred) = ptr; + item_extent[ndeferred] = extent; + ndeferred++; + } + } + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); + nflush = ndeferred; + } + if (config_stats && !merged_stats) { + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ + unsigned binshard; + bin_t *bin = arena_bin_choose_lock(tsd_tsdn(tsd), arena, binind, + &binshard); + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + } + + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); + tbin->ncached = rem; + if (tbin->ncached < tbin->low_water) { + tbin->low_water = tbin->ncached; + } +} + +void +tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, + unsigned rem, tcache_t *tcache) { + bool merged_stats = false; + + assert(binind < nhbins); + assert((cache_bin_sz_t)rem <= tbin->ncached); + + arena_t *tcache_arena = tcache->arena; + assert(tcache_arena != NULL); + unsigned nflush = tbin->ncached - rem; + VARIABLE_ARRAY(extent_t *, item_extent, nflush); + +#ifndef JEMALLOC_EXTRA_SIZE_CHECK + /* Look up extent once per item. */ + for (unsigned i = 0 ; i < nflush; i++) { + item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); + } +#else + tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush, + item_extent); +#endif + while (nflush > 0) { + /* Lock the arena associated with the first object. */ + extent_t *extent = item_extent[0]; + unsigned locked_arena_ind = extent_arena_ind_get(extent); + arena_t *locked_arena = arena_get(tsd_tsdn(tsd), + locked_arena_ind, false); + bool idump; + + if (config_prof) { + idump = false; + } + + bool lock_large = !arena_is_auto(locked_arena); + if (lock_large) { + malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx); + } + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + assert(ptr != NULL); + extent = item_extent[i]; + if (extent_arena_ind_get(extent) == locked_arena_ind) { + large_dalloc_prep_junked_locked(tsd_tsdn(tsd), + extent); + } + } + if ((config_prof || config_stats) && + (locked_arena == tcache_arena)) { + if (config_prof) { + idump = arena_prof_accum(tsd_tsdn(tsd), + tcache_arena, tcache->prof_accumbytes); + tcache->prof_accumbytes = 0; + } + if (config_stats) { + merged_stats = true; + arena_stats_large_flush_nrequests_add( + tsd_tsdn(tsd), &tcache_arena->stats, binind, + tbin->tstats.nrequests); + tbin->tstats.nrequests = 0; + } + } + if (lock_large) { + malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx); + } + + unsigned ndeferred = 0; + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + extent = item_extent[i]; + assert(ptr != NULL && extent != NULL); + + if (extent_arena_ind_get(extent) == locked_arena_ind) { + large_dalloc_finish(tsd_tsdn(tsd), extent); + } else { + /* + * This object was allocated via a different + * arena than the one that is currently locked. + * Stash the object, so that it can be handled + * in a future pass. + */ + *(tbin->avail - 1 - ndeferred) = ptr; + item_extent[ndeferred] = extent; + ndeferred++; + } + } + if (config_prof && idump) { + prof_idump(tsd_tsdn(tsd)); + } + arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - + ndeferred); + nflush = ndeferred; + } + if (config_stats && !merged_stats) { + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ + arena_stats_large_flush_nrequests_add(tsd_tsdn(tsd), + &tcache_arena->stats, binind, tbin->tstats.nrequests); + tbin->tstats.nrequests = 0; + } + + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); + tbin->ncached = rem; + if (tbin->ncached < tbin->low_water) { + tbin->low_water = tbin->ncached; + } +} + +void +tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { + assert(tcache->arena == NULL); + tcache->arena = arena; + + if (config_stats) { + /* Link into list of extant tcaches. */ + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + + ql_elm_new(tcache, link); + ql_tail_insert(&arena->tcache_ql, tcache, link); + cache_bin_array_descriptor_init( + &tcache->cache_bin_array_descriptor, tcache->bins_small, + tcache->bins_large); + ql_tail_insert(&arena->cache_bin_array_descriptor_ql, + &tcache->cache_bin_array_descriptor, link); + + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); + } +} + +static void +tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) { + arena_t *arena = tcache->arena; + assert(arena != NULL); + if (config_stats) { + /* Unlink from list of extant tcaches. */ + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + if (config_debug) { + bool in_ql = false; + tcache_t *iter; + ql_foreach(iter, &arena->tcache_ql, link) { + if (iter == tcache) { + in_ql = true; + break; + } + } + assert(in_ql); + } + ql_remove(&arena->tcache_ql, tcache, link); + ql_remove(&arena->cache_bin_array_descriptor_ql, + &tcache->cache_bin_array_descriptor, link); + tcache_stats_merge(tsdn, tcache, arena); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); + } + tcache->arena = NULL; +} + +void +tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { + tcache_arena_dissociate(tsdn, tcache); + tcache_arena_associate(tsdn, tcache, arena); +} + +bool +tsd_tcache_enabled_data_init(tsd_t *tsd) { + /* Called upon tsd initialization. */ + tsd_tcache_enabled_set(tsd, opt_tcache); + tsd_slow_update(tsd); + + if (opt_tcache) { + /* Trigger tcache init. */ + tsd_tcache_data_init(tsd); + } + + return false; +} + +/* Initialize auto tcache (embedded in TSD). */ +static void +tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) { + memset(&tcache->link, 0, sizeof(ql_elm(tcache_t))); + tcache->prof_accumbytes = 0; + tcache->next_gc_bin = 0; + tcache->arena = NULL; + + ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); + + size_t stack_offset = 0; + assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); + memset(tcache->bins_small, 0, sizeof(cache_bin_t) * SC_NBINS); + memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - SC_NBINS)); + unsigned i = 0; + for (; i < SC_NBINS; i++) { + tcache->lg_fill_div[i] = 1; + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + /* + * avail points past the available space. Allocations will + * access the slots toward higher addresses (for the benefit of + * prefetch). + */ + tcache_small_bin_get(tcache, i)->avail = + (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); + } + for (; i < nhbins; i++) { + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + tcache_large_bin_get(tcache, i)->avail = + (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); + } + assert(stack_offset == stack_nelms * sizeof(void *)); +} + +/* Initialize auto tcache (embedded in TSD). */ +bool +tsd_tcache_data_init(tsd_t *tsd) { + tcache_t *tcache = tsd_tcachep_get_unsafe(tsd); + assert(tcache_small_bin_get(tcache, 0)->avail == NULL); + size_t size = stack_nelms * sizeof(void *); + /* Avoid false cacheline sharing. */ + size = sz_sa2u(size, CACHELINE); + + void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, + NULL, true, arena_get(TSDN_NULL, 0, true)); + if (avail_array == NULL) { + return true; + } + + tcache_init(tsd, tcache, avail_array); + /* + * Initialization is a bit tricky here. After malloc init is done, all + * threads can rely on arena_choose and associate tcache accordingly. + * However, the thread that does actual malloc bootstrapping relies on + * functional tsd, and it can only rely on a0. In that case, we + * associate its tcache to a0 temporarily, and later on + * arena_choose_hard() will re-associate properly. + */ + tcache->arena = NULL; + arena_t *arena; + if (!malloc_initialized()) { + /* If in initialization, assign to a0. */ + arena = arena_get(tsd_tsdn(tsd), 0, false); + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); + } else { + arena = arena_choose(tsd, NULL); + /* This may happen if thread.tcache.enabled is used. */ + if (tcache->arena == NULL) { + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); + } + } + assert(arena == tcache->arena); + + return false; +} + +/* Created manual tcache for tcache.create mallctl. */ +tcache_t * +tcache_create_explicit(tsd_t *tsd) { + tcache_t *tcache; + size_t size, stack_offset; + + size = sizeof(tcache_t); + /* Naturally align the pointer stacks. */ + size = PTR_CEILING(size); + stack_offset = size; + size += stack_nelms * sizeof(void *); + /* Avoid false cacheline sharing. */ + size = sz_sa2u(size, CACHELINE); + + tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true, + arena_get(TSDN_NULL, 0, true)); + if (tcache == NULL) { + return NULL; + } + + tcache_init(tsd, tcache, + (void *)((uintptr_t)tcache + (uintptr_t)stack_offset)); + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL)); + + return tcache; +} + +static void +tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { + assert(tcache->arena != NULL); + + for (unsigned i = 0; i < SC_NBINS; i++) { + cache_bin_t *tbin = tcache_small_bin_get(tcache, i); + tcache_bin_flush_small(tsd, tcache, tbin, i, 0); + + if (config_stats) { + assert(tbin->tstats.nrequests == 0); + } + } + for (unsigned i = SC_NBINS; i < nhbins; i++) { + cache_bin_t *tbin = tcache_large_bin_get(tcache, i); + tcache_bin_flush_large(tsd, tbin, i, 0, tcache); + + if (config_stats) { + assert(tbin->tstats.nrequests == 0); + } + } + + if (config_prof && tcache->prof_accumbytes > 0 && + arena_prof_accum(tsd_tsdn(tsd), tcache->arena, + tcache->prof_accumbytes)) { + prof_idump(tsd_tsdn(tsd)); + } +} + +void +tcache_flush(tsd_t *tsd) { + assert(tcache_available(tsd)); + tcache_flush_cache(tsd, tsd_tcachep_get(tsd)); +} + +static void +tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { + tcache_flush_cache(tsd, tcache); + arena_t *arena = tcache->arena; + tcache_arena_dissociate(tsd_tsdn(tsd), tcache); + + if (tsd_tcache) { + /* Release the avail array for the TSD embedded auto tcache. */ + void *avail_array = + (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail - + (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *)); + idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true); + } else { + /* Release both the tcache struct and avail array. */ + idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true); + } + + /* + * The deallocation and tcache flush above may not trigger decay since + * we are on the tcache shutdown path (potentially with non-nominal + * tsd). Manually trigger decay to avoid pathological cases. Also + * include arena 0 because the tcache array is allocated from it. + */ + arena_decay(tsd_tsdn(tsd), arena_get(tsd_tsdn(tsd), 0, false), + false, false); + + if (arena_nthreads_get(arena, false) == 0 && + !background_thread_enabled()) { + /* Force purging when no threads assigned to the arena anymore. */ + arena_decay(tsd_tsdn(tsd), arena, false, true); + } else { + arena_decay(tsd_tsdn(tsd), arena, false, false); + } +} + +/* For auto tcache (embedded in TSD) only. */ +void +tcache_cleanup(tsd_t *tsd) { + tcache_t *tcache = tsd_tcachep_get(tsd); + if (!tcache_available(tsd)) { + assert(tsd_tcache_enabled_get(tsd) == false); + if (config_debug) { + assert(tcache_small_bin_get(tcache, 0)->avail == NULL); + } + return; + } + assert(tsd_tcache_enabled_get(tsd)); + assert(tcache_small_bin_get(tcache, 0)->avail != NULL); + + tcache_destroy(tsd, tcache, true); + if (config_debug) { + tcache_small_bin_get(tcache, 0)->avail = NULL; + } +} + +void +tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { + unsigned i; + + cassert(config_stats); + + /* Merge and reset tcache stats. */ + for (i = 0; i < SC_NBINS; i++) { + cache_bin_t *tbin = tcache_small_bin_get(tcache, i); + unsigned binshard; + bin_t *bin = arena_bin_choose_lock(tsdn, arena, i, &binshard); + bin->stats.nrequests += tbin->tstats.nrequests; + malloc_mutex_unlock(tsdn, &bin->lock); + tbin->tstats.nrequests = 0; + } + + for (; i < nhbins; i++) { + cache_bin_t *tbin = tcache_large_bin_get(tcache, i); + arena_stats_large_flush_nrequests_add(tsdn, &arena->stats, i, + tbin->tstats.nrequests); + tbin->tstats.nrequests = 0; + } +} + +static bool +tcaches_create_prep(tsd_t *tsd) { + bool err; + + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + + if (tcaches == NULL) { + tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *) + * (MALLOCX_TCACHE_MAX+1), CACHELINE); + if (tcaches == NULL) { + err = true; + goto label_return; + } + } + + if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) { + err = true; + goto label_return; + } + + err = false; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + return err; +} + +bool +tcaches_create(tsd_t *tsd, unsigned *r_ind) { + witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); + + bool err; + + if (tcaches_create_prep(tsd)) { + err = true; + goto label_return; + } + + tcache_t *tcache = tcache_create_explicit(tsd); + if (tcache == NULL) { + err = true; + goto label_return; + } + + tcaches_t *elm; + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcaches_avail != NULL) { + elm = tcaches_avail; + tcaches_avail = tcaches_avail->next; + elm->tcache = tcache; + *r_ind = (unsigned)(elm - tcaches); + } else { + elm = &tcaches[tcaches_past]; + elm->tcache = tcache; + *r_ind = tcaches_past; + tcaches_past++; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + + err = false; +label_return: + witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); + return err; +} + +static tcache_t * +tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm, bool allow_reinit) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx); + + if (elm->tcache == NULL) { + return NULL; + } + tcache_t *tcache = elm->tcache; + if (allow_reinit) { + elm->tcache = TCACHES_ELM_NEED_REINIT; + } else { + elm->tcache = NULL; + } + + if (tcache == TCACHES_ELM_NEED_REINIT) { + return NULL; + } + return tcache; +} + +void +tcaches_flush(tsd_t *tsd, unsigned ind) { + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind], true); + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcache != NULL) { + /* Destroy the tcache; recreate in tcaches_get() if needed. */ + tcache_destroy(tsd, tcache, false); + } +} + +void +tcaches_destroy(tsd_t *tsd, unsigned ind) { + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + tcaches_t *elm = &tcaches[ind]; + tcache_t *tcache = tcaches_elm_remove(tsd, elm, false); + elm->next = tcaches_avail; + tcaches_avail = elm; + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcache != NULL) { + tcache_destroy(tsd, tcache, false); + } +} + +bool +tcache_boot(tsdn_t *tsdn) { + /* If necessary, clamp opt_lg_tcache_max. */ + if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < + SC_SMALL_MAXCLASS) { + tcache_maxclass = SC_SMALL_MAXCLASS; + } else { + tcache_maxclass = (ZU(1) << opt_lg_tcache_max); + } + + if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES, + malloc_mutex_rank_exclusive)) { + return true; + } + + nhbins = sz_size2index(tcache_maxclass) + 1; + + /* Initialize tcache_bin_info. */ + tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins + * sizeof(cache_bin_info_t), CACHELINE); + if (tcache_bin_info == NULL) { + return true; + } + stack_nelms = 0; + unsigned i; + for (i = 0; i < SC_NBINS; i++) { + if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { + tcache_bin_info[i].ncached_max = + TCACHE_NSLOTS_SMALL_MIN; + } else if ((bin_infos[i].nregs << 1) <= + TCACHE_NSLOTS_SMALL_MAX) { + tcache_bin_info[i].ncached_max = + (bin_infos[i].nregs << 1); + } else { + tcache_bin_info[i].ncached_max = + TCACHE_NSLOTS_SMALL_MAX; + } + stack_nelms += tcache_bin_info[i].ncached_max; + } + for (; i < nhbins; i++) { + tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; + stack_nelms += tcache_bin_info[i].ncached_max; + } + + return false; +} + +void +tcache_prefork(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_prefork(tsdn, &tcaches_mtx); + } +} + +void +tcache_postfork_parent(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); + } +} + +void +tcache_postfork_child(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_postfork_child(tsdn, &tcaches_mtx); + } +} diff --git a/deps/jemalloc/src/test_hooks.c b/deps/jemalloc/src/test_hooks.c new file mode 100644 index 0000000..ace00d9 --- /dev/null +++ b/deps/jemalloc/src/test_hooks.c @@ -0,0 +1,12 @@ +#include "jemalloc/internal/jemalloc_preamble.h" + +/* + * The hooks are a little bit screwy -- they're not genuinely exported in the + * sense that we want them available to end-users, but we do want them visible + * from outside the generated library, so that we can use them in test code. + */ +JEMALLOC_EXPORT +void (*test_hooks_arena_new_hook)() = NULL; + +JEMALLOC_EXPORT +void (*test_hooks_libc_hook)() = NULL; diff --git a/deps/jemalloc/src/ticker.c b/deps/jemalloc/src/ticker.c new file mode 100644 index 0000000..d7b8cd2 --- /dev/null +++ b/deps/jemalloc/src/ticker.c @@ -0,0 +1,3 @@ +#define JEMALLOC_TICKER_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/deps/jemalloc/src/tsd.c b/deps/jemalloc/src/tsd.c new file mode 100644 index 0000000..a31f6b9 --- /dev/null +++ b/deps/jemalloc/src/tsd.c @@ -0,0 +1,534 @@ +#define JEMALLOC_TSD_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" + +/******************************************************************************/ +/* Data. */ + +static unsigned ncleanups; +static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; + +/* TSD_INITIALIZER triggers "-Wmissing-field-initializer" */ +JEMALLOC_DIAGNOSTIC_PUSH +JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS + +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER; +JEMALLOC_TSD_TYPE_ATTR(bool) JEMALLOC_TLS_MODEL tsd_initialized = false; +bool tsd_booted = false; +#elif (defined(JEMALLOC_TLS)) +JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER; +pthread_key_t tsd_tsd; +bool tsd_booted = false; +#elif (defined(_WIN32)) +DWORD tsd_tsd; +tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER}; +bool tsd_booted = false; +#else + +/* + * This contains a mutex, but it's pretty convenient to allow the mutex code to + * have a dependency on tsd. So we define the struct here, and only refer to it + * by pointer in the header. + */ +struct tsd_init_head_s { + ql_head(tsd_init_block_t) blocks; + malloc_mutex_t lock; +}; + +pthread_key_t tsd_tsd; +tsd_init_head_t tsd_init_head = { + ql_head_initializer(blocks), + MALLOC_MUTEX_INITIALIZER +}; + +tsd_wrapper_t tsd_boot_wrapper = { + false, + TSD_INITIALIZER +}; +bool tsd_booted = false; +#endif + +JEMALLOC_DIAGNOSTIC_POP + +/******************************************************************************/ + +/* A list of all the tsds in the nominal state. */ +typedef ql_head(tsd_t) tsd_list_t; +static tsd_list_t tsd_nominal_tsds = ql_head_initializer(tsd_nominal_tsds); +static malloc_mutex_t tsd_nominal_tsds_lock; + +/* How many slow-path-enabling features are turned on. */ +static atomic_u32_t tsd_global_slow_count = ATOMIC_INIT(0); + +static bool +tsd_in_nominal_list(tsd_t *tsd) { + tsd_t *tsd_list; + bool found = false; + /* + * We don't know that tsd is nominal; it might not be safe to get data + * out of it here. + */ + malloc_mutex_lock(TSDN_NULL, &tsd_nominal_tsds_lock); + ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) { + if (tsd == tsd_list) { + found = true; + break; + } + } + malloc_mutex_unlock(TSDN_NULL, &tsd_nominal_tsds_lock); + return found; +} + +static void +tsd_add_nominal(tsd_t *tsd) { + assert(!tsd_in_nominal_list(tsd)); + assert(tsd_state_get(tsd) <= tsd_state_nominal_max); + ql_elm_new(tsd, TSD_MANGLE(tcache).tsd_link); + malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); + ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link); + malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); +} + +static void +tsd_remove_nominal(tsd_t *tsd) { + assert(tsd_in_nominal_list(tsd)); + assert(tsd_state_get(tsd) <= tsd_state_nominal_max); + malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); + ql_remove(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link); + malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); +} + +static void +tsd_force_recompute(tsdn_t *tsdn) { + /* + * The stores to tsd->state here need to synchronize with the exchange + * in tsd_slow_update. + */ + atomic_fence(ATOMIC_RELEASE); + malloc_mutex_lock(tsdn, &tsd_nominal_tsds_lock); + tsd_t *remote_tsd; + ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) { + assert(tsd_atomic_load(&remote_tsd->state, ATOMIC_RELAXED) + <= tsd_state_nominal_max); + tsd_atomic_store(&remote_tsd->state, tsd_state_nominal_recompute, + ATOMIC_RELAXED); + } + malloc_mutex_unlock(tsdn, &tsd_nominal_tsds_lock); +} + +void +tsd_global_slow_inc(tsdn_t *tsdn) { + atomic_fetch_add_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED); + /* + * We unconditionally force a recompute, even if the global slow count + * was already positive. If we didn't, then it would be possible for us + * to return to the user, have the user synchronize externally with some + * other thread, and then have that other thread not have picked up the + * update yet (since the original incrementing thread might still be + * making its way through the tsd list). + */ + tsd_force_recompute(tsdn); +} + +void tsd_global_slow_dec(tsdn_t *tsdn) { + atomic_fetch_sub_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED); + /* See the note in ..._inc(). */ + tsd_force_recompute(tsdn); +} + +static bool +tsd_local_slow(tsd_t *tsd) { + return !tsd_tcache_enabled_get(tsd) + || tsd_reentrancy_level_get(tsd) > 0; +} + +bool +tsd_global_slow() { + return atomic_load_u32(&tsd_global_slow_count, ATOMIC_RELAXED) > 0; +} + +/******************************************************************************/ + +static uint8_t +tsd_state_compute(tsd_t *tsd) { + if (!tsd_nominal(tsd)) { + return tsd_state_get(tsd); + } + /* We're in *a* nominal state; but which one? */ + if (malloc_slow || tsd_local_slow(tsd) || tsd_global_slow()) { + return tsd_state_nominal_slow; + } else { + return tsd_state_nominal; + } +} + +void +tsd_slow_update(tsd_t *tsd) { + uint8_t old_state; + do { + uint8_t new_state = tsd_state_compute(tsd); + old_state = tsd_atomic_exchange(&tsd->state, new_state, + ATOMIC_ACQUIRE); + } while (old_state == tsd_state_nominal_recompute); +} + +void +tsd_state_set(tsd_t *tsd, uint8_t new_state) { + /* Only the tsd module can change the state *to* recompute. */ + assert(new_state != tsd_state_nominal_recompute); + uint8_t old_state = tsd_atomic_load(&tsd->state, ATOMIC_RELAXED); + if (old_state > tsd_state_nominal_max) { + /* + * Not currently in the nominal list, but it might need to be + * inserted there. + */ + assert(!tsd_in_nominal_list(tsd)); + tsd_atomic_store(&tsd->state, new_state, ATOMIC_RELAXED); + if (new_state <= tsd_state_nominal_max) { + tsd_add_nominal(tsd); + } + } else { + /* + * We're currently nominal. If the new state is non-nominal, + * great; we take ourselves off the list and just enter the new + * state. + */ + assert(tsd_in_nominal_list(tsd)); + if (new_state > tsd_state_nominal_max) { + tsd_remove_nominal(tsd); + tsd_atomic_store(&tsd->state, new_state, + ATOMIC_RELAXED); + } else { + /* + * This is the tricky case. We're transitioning from + * one nominal state to another. The caller can't know + * about any races that are occuring at the same time, + * so we always have to recompute no matter what. + */ + tsd_slow_update(tsd); + } + } +} + +static bool +tsd_data_init(tsd_t *tsd) { + /* + * We initialize the rtree context first (before the tcache), since the + * tcache initialization depends on it. + */ + rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); + + /* + * A nondeterministic seed based on the address of tsd reduces + * the likelihood of lockstep non-uniform cache index + * utilization among identical concurrent processes, but at the + * cost of test repeatability. For debug builds, instead use a + * deterministic seed. + */ + *tsd_offset_statep_get(tsd) = config_debug ? 0 : + (uint64_t)(uintptr_t)tsd; + + return tsd_tcache_enabled_data_init(tsd); +} + +static void +assert_tsd_data_cleanup_done(tsd_t *tsd) { + assert(!tsd_nominal(tsd)); + assert(!tsd_in_nominal_list(tsd)); + assert(*tsd_arenap_get_unsafe(tsd) == NULL); + assert(*tsd_iarenap_get_unsafe(tsd) == NULL); + assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true); + assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL); + assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false); + assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL); +} + +static bool +tsd_data_init_nocleanup(tsd_t *tsd) { + assert(tsd_state_get(tsd) == tsd_state_reincarnated || + tsd_state_get(tsd) == tsd_state_minimal_initialized); + /* + * During reincarnation, there is no guarantee that the cleanup function + * will be called (deallocation may happen after all tsd destructors). + * We set up tsd in a way that no cleanup is needed. + */ + rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + *tsd_tcache_enabledp_get_unsafe(tsd) = false; + *tsd_reentrancy_levelp_get(tsd) = 1; + assert_tsd_data_cleanup_done(tsd); + + return false; +} + +tsd_t * +tsd_fetch_slow(tsd_t *tsd, bool minimal) { + assert(!tsd_fast(tsd)); + + if (tsd_state_get(tsd) == tsd_state_nominal_slow) { + /* + * On slow path but no work needed. Note that we can't + * necessarily *assert* that we're slow, because we might be + * slow because of an asynchronous modification to global state, + * which might be asynchronously modified *back*. + */ + } else if (tsd_state_get(tsd) == tsd_state_nominal_recompute) { + tsd_slow_update(tsd); + } else if (tsd_state_get(tsd) == tsd_state_uninitialized) { + if (!minimal) { + if (tsd_booted) { + tsd_state_set(tsd, tsd_state_nominal); + tsd_slow_update(tsd); + /* Trigger cleanup handler registration. */ + tsd_set(tsd); + tsd_data_init(tsd); + } + } else { + tsd_state_set(tsd, tsd_state_minimal_initialized); + tsd_set(tsd); + tsd_data_init_nocleanup(tsd); + } + } else if (tsd_state_get(tsd) == tsd_state_minimal_initialized) { + if (!minimal) { + /* Switch to fully initialized. */ + tsd_state_set(tsd, tsd_state_nominal); + assert(*tsd_reentrancy_levelp_get(tsd) >= 1); + (*tsd_reentrancy_levelp_get(tsd))--; + tsd_slow_update(tsd); + tsd_data_init(tsd); + } else { + assert_tsd_data_cleanup_done(tsd); + } + } else if (tsd_state_get(tsd) == tsd_state_purgatory) { + tsd_state_set(tsd, tsd_state_reincarnated); + tsd_set(tsd); + tsd_data_init_nocleanup(tsd); + } else { + assert(tsd_state_get(tsd) == tsd_state_reincarnated); + } + + return tsd; +} + +void * +malloc_tsd_malloc(size_t size) { + return a0malloc(CACHELINE_CEILING(size)); +} + +void +malloc_tsd_dalloc(void *wrapper) { + a0dalloc(wrapper); +} + +#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) +#ifndef _WIN32 +JEMALLOC_EXPORT +#endif +void +_malloc_thread_cleanup(void) { + bool pending[MALLOC_TSD_CLEANUPS_MAX], again; + unsigned i; + + for (i = 0; i < ncleanups; i++) { + pending[i] = true; + } + + do { + again = false; + for (i = 0; i < ncleanups; i++) { + if (pending[i]) { + pending[i] = cleanups[i](); + if (pending[i]) { + again = true; + } + } + } + } while (again); +} +#endif + +void +malloc_tsd_cleanup_register(bool (*f)(void)) { + assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); + cleanups[ncleanups] = f; + ncleanups++; +} + +static void +tsd_do_data_cleanup(tsd_t *tsd) { + prof_tdata_cleanup(tsd); + iarena_cleanup(tsd); + arena_cleanup(tsd); + arenas_tdata_cleanup(tsd); + tcache_cleanup(tsd); + witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd)); +} + +void +tsd_cleanup(void *arg) { + tsd_t *tsd = (tsd_t *)arg; + + switch (tsd_state_get(tsd)) { + case tsd_state_uninitialized: + /* Do nothing. */ + break; + case tsd_state_minimal_initialized: + /* This implies the thread only did free() in its life time. */ + /* Fall through. */ + case tsd_state_reincarnated: + /* + * Reincarnated means another destructor deallocated memory + * after the destructor was called. Cleanup isn't required but + * is still called for testing and completeness. + */ + assert_tsd_data_cleanup_done(tsd); + /* Fall through. */ + case tsd_state_nominal: + case tsd_state_nominal_slow: + tsd_do_data_cleanup(tsd); + tsd_state_set(tsd, tsd_state_purgatory); + tsd_set(tsd); + break; + case tsd_state_purgatory: + /* + * The previous time this destructor was called, we set the + * state to tsd_state_purgatory so that other destructors + * wouldn't cause re-creation of the tsd. This time, do + * nothing, and do not request another callback. + */ + break; + default: + not_reached(); + } +#ifdef JEMALLOC_JET + test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd); + int *data = tsd_test_datap_get_unsafe(tsd); + if (test_callback != NULL) { + test_callback(data); + } +#endif +} + +tsd_t * +malloc_tsd_boot0(void) { + tsd_t *tsd; + + ncleanups = 0; + if (malloc_mutex_init(&tsd_nominal_tsds_lock, "tsd_nominal_tsds_lock", + WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) { + return NULL; + } + if (tsd_boot0()) { + return NULL; + } + tsd = tsd_fetch(); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + return tsd; +} + +void +malloc_tsd_boot1(void) { + tsd_boot1(); + tsd_t *tsd = tsd_fetch(); + /* malloc_slow has been set properly. Update tsd_slow. */ + tsd_slow_update(tsd); + *tsd_arenas_tdata_bypassp_get(tsd) = false; +} + +#ifdef _WIN32 +static BOOL WINAPI +_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { + switch (fdwReason) { +#ifdef JEMALLOC_LAZY_LOCK + case DLL_THREAD_ATTACH: + isthreaded = true; + break; +#endif + case DLL_THREAD_DETACH: + _malloc_thread_cleanup(); + break; + default: + break; + } + return true; +} + +/* + * We need to be able to say "read" here (in the "pragma section"), but have + * hooked "read". We won't read for the rest of the file, so we can get away + * with unhooking. + */ +#ifdef read +# undef read +#endif + +#ifdef _MSC_VER +# ifdef _M_IX86 +# pragma comment(linker, "/INCLUDE:__tls_used") +# pragma comment(linker, "/INCLUDE:_tls_callback") +# else +# pragma comment(linker, "/INCLUDE:_tls_used") +# pragma comment(linker, "/INCLUDE:" STRINGIFY(tls_callback) ) +# endif +# pragma section(".CRT$XLY",long,read) +#endif +JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) +BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, + DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; +#endif + +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +void * +tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { + pthread_t self = pthread_self(); + tsd_init_block_t *iter; + + /* Check whether this thread has already inserted into the list. */ + malloc_mutex_lock(TSDN_NULL, &head->lock); + ql_foreach(iter, &head->blocks, link) { + if (iter->thread == self) { + malloc_mutex_unlock(TSDN_NULL, &head->lock); + return iter->data; + } + } + /* Insert block into list. */ + ql_elm_new(block, link); + block->thread = self; + ql_tail_insert(&head->blocks, block, link); + malloc_mutex_unlock(TSDN_NULL, &head->lock); + return NULL; +} + +void +tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) { + malloc_mutex_lock(TSDN_NULL, &head->lock); + ql_remove(&head->blocks, block, link); + malloc_mutex_unlock(TSDN_NULL, &head->lock); +} +#endif + +void +tsd_prefork(tsd_t *tsd) { + malloc_mutex_prefork(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); +} + +void +tsd_postfork_parent(tsd_t *tsd) { + malloc_mutex_postfork_parent(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); +} + +void +tsd_postfork_child(tsd_t *tsd) { + malloc_mutex_postfork_child(tsd_tsdn(tsd), &tsd_nominal_tsds_lock); + ql_new(&tsd_nominal_tsds); + + if (tsd_state_get(tsd) <= tsd_state_nominal_max) { + tsd_add_nominal(tsd); + } +} diff --git a/deps/jemalloc/src/witness.c b/deps/jemalloc/src/witness.c new file mode 100644 index 0000000..f42b72a --- /dev/null +++ b/deps/jemalloc/src/witness.c @@ -0,0 +1,100 @@ +#define JEMALLOC_WITNESS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" + +void +witness_init(witness_t *witness, const char *name, witness_rank_t rank, + witness_comp_t *comp, void *opaque) { + witness->name = name; + witness->rank = rank; + witness->comp = comp; + witness->opaque = opaque; +} + +static void +witness_lock_error_impl(const witness_list_t *witnesses, + const witness_t *witness) { + witness_t *w; + + malloc_printf(": Lock rank order reversal:"); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf(" %s(%u)\n", witness->name, witness->rank); + abort(); +} +witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl; + +static void +witness_owner_error_impl(const witness_t *witness) { + malloc_printf(": Should own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +witness_owner_error_t *JET_MUTABLE witness_owner_error = + witness_owner_error_impl; + +static void +witness_not_owner_error_impl(const witness_t *witness) { + malloc_printf(": Should not own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error = + witness_not_owner_error_impl; + +static void +witness_depth_error_impl(const witness_list_t *witnesses, + witness_rank_t rank_inclusive, unsigned depth) { + witness_t *w; + + malloc_printf(": Should own %u lock%s of rank >= %u:", depth, + (depth != 1) ? "s" : "", rank_inclusive); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf("\n"); + abort(); +} +witness_depth_error_t *JET_MUTABLE witness_depth_error = + witness_depth_error_impl; + +void +witnesses_cleanup(witness_tsd_t *witness_tsd) { + witness_assert_lockless(witness_tsd_tsdn(witness_tsd)); + + /* Do nothing. */ +} + +void +witness_prefork(witness_tsd_t *witness_tsd) { + if (!config_debug) { + return; + } + witness_tsd->forking = true; +} + +void +witness_postfork_parent(witness_tsd_t *witness_tsd) { + if (!config_debug) { + return; + } + witness_tsd->forking = false; +} + +void +witness_postfork_child(witness_tsd_t *witness_tsd) { + if (!config_debug) { + return; + } +#ifndef JEMALLOC_MUTEX_INIT_CB + witness_list_t *witnesses; + + witnesses = &witness_tsd->witnesses; + ql_new(witnesses); +#endif + witness_tsd->forking = false; +} diff --git a/deps/jemalloc/src/zone.c b/deps/jemalloc/src/zone.c new file mode 100644 index 0000000..23dfdd0 --- /dev/null +++ b/deps/jemalloc/src/zone.c @@ -0,0 +1,469 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" + +#ifndef JEMALLOC_ZONE +# error "This source file is for zones on Darwin (OS X)." +#endif + +/* Definitions of the following structs in malloc/malloc.h might be too old + * for the built binary to run on newer versions of OSX. So use the newest + * possible version of those structs. + */ +typedef struct _malloc_zone_t { + void *reserved1; + void *reserved2; + size_t (*size)(struct _malloc_zone_t *, const void *); + void *(*malloc)(struct _malloc_zone_t *, size_t); + void *(*calloc)(struct _malloc_zone_t *, size_t, size_t); + void *(*valloc)(struct _malloc_zone_t *, size_t); + void (*free)(struct _malloc_zone_t *, void *); + void *(*realloc)(struct _malloc_zone_t *, void *, size_t); + void (*destroy)(struct _malloc_zone_t *); + const char *zone_name; + unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned); + void (*batch_free)(struct _malloc_zone_t *, void **, unsigned); + struct malloc_introspection_t *introspect; + unsigned version; + void *(*memalign)(struct _malloc_zone_t *, size_t, size_t); + void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t); + size_t (*pressure_relief)(struct _malloc_zone_t *, size_t); +} malloc_zone_t; + +typedef struct { + vm_address_t address; + vm_size_t size; +} vm_range_t; + +typedef struct malloc_statistics_t { + unsigned blocks_in_use; + size_t size_in_use; + size_t max_size_in_use; + size_t size_allocated; +} malloc_statistics_t; + +typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **); + +typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned); + +typedef struct malloc_introspection_t { + kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t); + size_t (*good_size)(malloc_zone_t *, size_t); + boolean_t (*check)(malloc_zone_t *); + void (*print)(malloc_zone_t *, boolean_t); + void (*log)(malloc_zone_t *, void *); + void (*force_lock)(malloc_zone_t *); + void (*force_unlock)(malloc_zone_t *); + void (*statistics)(malloc_zone_t *, malloc_statistics_t *); + boolean_t (*zone_locked)(malloc_zone_t *); + boolean_t (*enable_discharge_checking)(malloc_zone_t *); + boolean_t (*disable_discharge_checking)(malloc_zone_t *); + void (*discharge)(malloc_zone_t *, void *); +#ifdef __BLOCKS__ + void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *)); +#else + void *enumerate_unavailable_without_blocks; +#endif + void (*reinit_lock)(malloc_zone_t *); +} malloc_introspection_t; + +extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *); + +extern malloc_zone_t *malloc_default_zone(void); + +extern void malloc_zone_register(malloc_zone_t *zone); + +extern void malloc_zone_unregister(malloc_zone_t *zone); + +/* + * The malloc_default_purgeable_zone() function is only available on >= 10.6. + * We need to check whether it is present at runtime, thus the weak_import. + */ +extern malloc_zone_t *malloc_default_purgeable_zone(void) +JEMALLOC_ATTR(weak_import); + +/******************************************************************************/ +/* Data. */ + +static malloc_zone_t *default_zone, *purgeable_zone; +static malloc_zone_t jemalloc_zone; +static struct malloc_introspection_t jemalloc_zone_introspect; +static pid_t zone_force_lock_pid = -1; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static size_t zone_size(malloc_zone_t *zone, const void *ptr); +static void *zone_malloc(malloc_zone_t *zone, size_t size); +static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); +static void *zone_valloc(malloc_zone_t *zone, size_t size); +static void zone_free(malloc_zone_t *zone, void *ptr); +static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); +static void *zone_memalign(malloc_zone_t *zone, size_t alignment, + size_t size); +static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, + size_t size); +static void zone_destroy(malloc_zone_t *zone); +static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, + void **results, unsigned num_requested); +static void zone_batch_free(struct _malloc_zone_t *zone, + void **to_be_freed, unsigned num_to_be_freed); +static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal); +static size_t zone_good_size(malloc_zone_t *zone, size_t size); +static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, + vm_address_t zone_address, memory_reader_t reader, + vm_range_recorder_t recorder); +static boolean_t zone_check(malloc_zone_t *zone); +static void zone_print(malloc_zone_t *zone, boolean_t verbose); +static void zone_log(malloc_zone_t *zone, void *address); +static void zone_force_lock(malloc_zone_t *zone); +static void zone_force_unlock(malloc_zone_t *zone); +static void zone_statistics(malloc_zone_t *zone, + malloc_statistics_t *stats); +static boolean_t zone_locked(malloc_zone_t *zone); +static void zone_reinit_lock(malloc_zone_t *zone); + +/******************************************************************************/ +/* + * Functions. + */ + +static size_t +zone_size(malloc_zone_t *zone, const void *ptr) { + /* + * There appear to be places within Darwin (such as setenv(3)) that + * cause calls to this function with pointers that *no* zone owns. If + * we knew that all pointers were owned by *some* zone, we could split + * our zone into two parts, and use one as the default allocator and + * the other as the default deallocator/reallocator. Since that will + * not work in practice, we must check all pointers to assure that they + * reside within a mapped extent before determining size. + */ + return ivsalloc(tsdn_fetch(), ptr); +} + +static void * +zone_malloc(malloc_zone_t *zone, size_t size) { + return je_malloc(size); +} + +static void * +zone_calloc(malloc_zone_t *zone, size_t num, size_t size) { + return je_calloc(num, size); +} + +static void * +zone_valloc(malloc_zone_t *zone, size_t size) { + void *ret = NULL; /* Assignment avoids useless compiler warning. */ + + je_posix_memalign(&ret, PAGE, size); + + return ret; +} + +static void +zone_free(malloc_zone_t *zone, void *ptr) { + if (ivsalloc(tsdn_fetch(), ptr) != 0) { + je_free(ptr); + return; + } + + free(ptr); +} + +static void * +zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { + if (ivsalloc(tsdn_fetch(), ptr) != 0) { + return je_realloc(ptr, size); + } + + return realloc(ptr, size); +} + +static void * +zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) { + void *ret = NULL; /* Assignment avoids useless compiler warning. */ + + je_posix_memalign(&ret, alignment, size); + + return ret; +} + +static void +zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) { + size_t alloc_size; + + alloc_size = ivsalloc(tsdn_fetch(), ptr); + if (alloc_size != 0) { + assert(alloc_size == size); + je_free(ptr); + return; + } + + free(ptr); +} + +static void +zone_destroy(malloc_zone_t *zone) { + /* This function should never be called. */ + not_reached(); +} + +static unsigned +zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, + unsigned num_requested) { + unsigned i; + + for (i = 0; i < num_requested; i++) { + results[i] = je_malloc(size); + if (!results[i]) + break; + } + + return i; +} + +static void +zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, + unsigned num_to_be_freed) { + unsigned i; + + for (i = 0; i < num_to_be_freed; i++) { + zone_free(zone, to_be_freed[i]); + to_be_freed[i] = NULL; + } +} + +static size_t +zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) { + return 0; +} + +static size_t +zone_good_size(malloc_zone_t *zone, size_t size) { + if (size == 0) { + size = 1; + } + return sz_s2u(size); +} + +static kern_return_t +zone_enumerator(task_t task, void *data, unsigned type_mask, + vm_address_t zone_address, memory_reader_t reader, + vm_range_recorder_t recorder) { + return KERN_SUCCESS; +} + +static boolean_t +zone_check(malloc_zone_t *zone) { + return true; +} + +static void +zone_print(malloc_zone_t *zone, boolean_t verbose) { +} + +static void +zone_log(malloc_zone_t *zone, void *address) { +} + +static void +zone_force_lock(malloc_zone_t *zone) { + if (isthreaded) { + /* + * See the note in zone_force_unlock, below, to see why we need + * this. + */ + assert(zone_force_lock_pid == -1); + zone_force_lock_pid = getpid(); + jemalloc_prefork(); + } +} + +static void +zone_force_unlock(malloc_zone_t *zone) { + /* + * zone_force_lock and zone_force_unlock are the entry points to the + * forking machinery on OS X. The tricky thing is, the child is not + * allowed to unlock mutexes locked in the parent, even if owned by the + * forking thread (and the mutex type we use in OS X will fail an assert + * if we try). In the child, we can get away with reinitializing all + * the mutexes, which has the effect of unlocking them. In the parent, + * doing this would mean we wouldn't wake any waiters blocked on the + * mutexes we unlock. So, we record the pid of the current thread in + * zone_force_lock, and use that to detect if we're in the parent or + * child here, to decide which unlock logic we need. + */ + if (isthreaded) { + assert(zone_force_lock_pid != -1); + if (getpid() == zone_force_lock_pid) { + jemalloc_postfork_parent(); + } else { + jemalloc_postfork_child(); + } + zone_force_lock_pid = -1; + } +} + +static void +zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { + /* We make no effort to actually fill the values */ + stats->blocks_in_use = 0; + stats->size_in_use = 0; + stats->max_size_in_use = 0; + stats->size_allocated = 0; +} + +static boolean_t +zone_locked(malloc_zone_t *zone) { + /* Pretend no lock is being held */ + return false; +} + +static void +zone_reinit_lock(malloc_zone_t *zone) { + /* As of OSX 10.12, this function is only used when force_unlock would + * be used if the zone version were < 9. So just use force_unlock. */ + zone_force_unlock(zone); +} + +static void +zone_init(void) { + jemalloc_zone.size = zone_size; + jemalloc_zone.malloc = zone_malloc; + jemalloc_zone.calloc = zone_calloc; + jemalloc_zone.valloc = zone_valloc; + jemalloc_zone.free = zone_free; + jemalloc_zone.realloc = zone_realloc; + jemalloc_zone.destroy = zone_destroy; + jemalloc_zone.zone_name = "jemalloc_zone"; + jemalloc_zone.batch_malloc = zone_batch_malloc; + jemalloc_zone.batch_free = zone_batch_free; + jemalloc_zone.introspect = &jemalloc_zone_introspect; + jemalloc_zone.version = 9; + jemalloc_zone.memalign = zone_memalign; + jemalloc_zone.free_definite_size = zone_free_definite_size; + jemalloc_zone.pressure_relief = zone_pressure_relief; + + jemalloc_zone_introspect.enumerator = zone_enumerator; + jemalloc_zone_introspect.good_size = zone_good_size; + jemalloc_zone_introspect.check = zone_check; + jemalloc_zone_introspect.print = zone_print; + jemalloc_zone_introspect.log = zone_log; + jemalloc_zone_introspect.force_lock = zone_force_lock; + jemalloc_zone_introspect.force_unlock = zone_force_unlock; + jemalloc_zone_introspect.statistics = zone_statistics; + jemalloc_zone_introspect.zone_locked = zone_locked; + jemalloc_zone_introspect.enable_discharge_checking = NULL; + jemalloc_zone_introspect.disable_discharge_checking = NULL; + jemalloc_zone_introspect.discharge = NULL; +#ifdef __BLOCKS__ + jemalloc_zone_introspect.enumerate_discharged_pointers = NULL; +#else + jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL; +#endif + jemalloc_zone_introspect.reinit_lock = zone_reinit_lock; +} + +static malloc_zone_t * +zone_default_get(void) { + malloc_zone_t **zones = NULL; + unsigned int num_zones = 0; + + /* + * On OSX 10.12, malloc_default_zone returns a special zone that is not + * present in the list of registered zones. That zone uses a "lite zone" + * if one is present (apparently enabled when malloc stack logging is + * enabled), or the first registered zone otherwise. In practice this + * means unless malloc stack logging is enabled, the first registered + * zone is the default. So get the list of zones to get the first one, + * instead of relying on malloc_default_zone. + */ + if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, + (vm_address_t**)&zones, &num_zones)) { + /* + * Reset the value in case the failure happened after it was + * set. + */ + num_zones = 0; + } + + if (num_zones) { + return zones[0]; + } + + return malloc_default_zone(); +} + +/* As written, this function can only promote jemalloc_zone. */ +static void +zone_promote(void) { + malloc_zone_t *zone; + + do { + /* + * Unregister and reregister the default zone. On OSX >= 10.6, + * unregistering takes the last registered zone and places it + * at the location of the specified zone. Unregistering the + * default zone thus makes the last registered one the default. + * On OSX < 10.6, unregistering shifts all registered zones. + * The first registered zone then becomes the default. + */ + malloc_zone_unregister(default_zone); + malloc_zone_register(default_zone); + + /* + * On OSX 10.6, having the default purgeable zone appear before + * the default zone makes some things crash because it thinks it + * owns the default zone allocated pointers. We thus + * unregister/re-register it in order to ensure it's always + * after the default zone. On OSX < 10.6, there is no purgeable + * zone, so this does nothing. On OSX >= 10.6, unregistering + * replaces the purgeable zone with the last registered zone + * above, i.e. the default zone. Registering it again then puts + * it at the end, obviously after the default zone. + */ + if (purgeable_zone != NULL) { + malloc_zone_unregister(purgeable_zone); + malloc_zone_register(purgeable_zone); + } + + zone = zone_default_get(); + } while (zone != &jemalloc_zone); +} + +JEMALLOC_ATTR(constructor) +void +zone_register(void) { + /* + * If something else replaced the system default zone allocator, don't + * register jemalloc's. + */ + default_zone = zone_default_get(); + if (!default_zone->zone_name || strcmp(default_zone->zone_name, + "DefaultMallocZone") != 0) { + return; + } + + /* + * The default purgeable zone is created lazily by OSX's libc. It uses + * the default zone when it is created for "small" allocations + * (< 15 KiB), but assumes the default zone is a scalable_zone. This + * obviously fails when the default zone is the jemalloc zone, so + * malloc_default_purgeable_zone() is called beforehand so that the + * default purgeable zone is created when the default zone is still + * a scalable_zone. As purgeable zones only exist on >= 10.6, we need + * to check for the existence of malloc_default_purgeable_zone() at + * run time. + */ + purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : + malloc_default_purgeable_zone(); + + /* Register the custom zone. At this point it won't be the default. */ + zone_init(); + malloc_zone_register(&jemalloc_zone); + + /* Promote the custom zone to be default. */ + zone_promote(); +} diff --git a/deps/jemalloc/test/include/test/SFMT-alti.h b/deps/jemalloc/test/include/test/SFMT-alti.h new file mode 100644 index 0000000..a1885db --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT-alti.h @@ -0,0 +1,186 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT-alti.h + * + * @brief SIMD oriented Fast Mersenne Twister(SFMT) + * pseudorandom number generator + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software. + * see LICENSE.txt + */ + +#ifndef SFMT_ALTI_H +#define SFMT_ALTI_H + +/** + * This function represents the recursion formula in AltiVec and BIG ENDIAN. + * @param a a 128-bit part of the interal state array + * @param b a 128-bit part of the interal state array + * @param c a 128-bit part of the interal state array + * @param d a 128-bit part of the interal state array + * @return output + */ +JEMALLOC_ALWAYS_INLINE +vector unsigned int vec_recursion(vector unsigned int a, + vector unsigned int b, + vector unsigned int c, + vector unsigned int d) { + + const vector unsigned int sl1 = ALTI_SL1; + const vector unsigned int sr1 = ALTI_SR1; +#ifdef ONLY64 + const vector unsigned int mask = ALTI_MSK64; + const vector unsigned char perm_sl = ALTI_SL2_PERM64; + const vector unsigned char perm_sr = ALTI_SR2_PERM64; +#else + const vector unsigned int mask = ALTI_MSK; + const vector unsigned char perm_sl = ALTI_SL2_PERM; + const vector unsigned char perm_sr = ALTI_SR2_PERM; +#endif + vector unsigned int v, w, x, y, z; + x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl); + v = a; + y = vec_sr(b, sr1); + z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr); + w = vec_sl(d, sl1); + z = vec_xor(z, w); + y = vec_and(y, mask); + v = vec_xor(v, x); + z = vec_xor(z, y); + z = vec_xor(z, v); + return z; +} + +/** + * This function fills the internal state array with pseudorandom + * integers. + */ +static inline void gen_rand_all(sfmt_t *ctx) { + int i; + vector unsigned int r, r1, r2; + + r1 = ctx->sfmt[N - 2].s; + r2 = ctx->sfmt[N - 1].s; + for (i = 0; i < N - POS1; i++) { + r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); + ctx->sfmt[i].s = r; + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2); + ctx->sfmt[i].s = r; + r1 = r2; + r2 = r; + } +} + +/** + * This function fills the user-specified array with pseudorandom + * integers. + * + * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param size number of 128-bit pesudorandom numbers to be generated. + */ +static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + vector unsigned int r, r1, r2; + + r1 = ctx->sfmt[N - 2].s; + r2 = ctx->sfmt[N - 1].s; + for (i = 0; i < N - POS1; i++) { + r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + /* main loop */ + for (; i < size - N; i++) { + r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + for (j = 0; j < 2 * N - size; j++) { + ctx->sfmt[j].s = array[j + size - N].s; + } + for (; i < size; i++) { + r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + ctx->sfmt[j++].s = r; + r1 = r2; + r2 = r; + } +} + +#ifndef ONLY64 +#if defined(__APPLE__) +#define ALTI_SWAP (vector unsigned char) \ + (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11) +#else +#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11} +#endif +/** + * This function swaps high and low 32-bit of 64-bit integers in user + * specified array. + * + * @param array an 128-bit array to be swaped. + * @param size size of 128-bit array. + */ +static inline void swap(w128_t *array, int size) { + int i; + const vector unsigned char perm = ALTI_SWAP; + + for (i = 0; i < size; i++) { + array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm); + } +} +#endif + +#endif diff --git a/deps/jemalloc/test/include/test/SFMT-params.h b/deps/jemalloc/test/include/test/SFMT-params.h new file mode 100644 index 0000000..ade6622 --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT-params.h @@ -0,0 +1,132 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS_H +#define SFMT_PARAMS_H + +#if !defined(MEXP) +#ifdef __GNUC__ + #warning "MEXP is not defined. I assume MEXP is 19937." +#endif + #define MEXP 19937 +#endif +/*----------------- + BASIC DEFINITIONS + -----------------*/ +/** Mersenne Exponent. The period of the sequence + * is a multiple of 2^MEXP-1. + * #define MEXP 19937 */ +/** SFMT generator has an internal state array of 128-bit integers, + * and N is its size. */ +#define N (MEXP / 128 + 1) +/** N32 is the size of internal state array when regarded as an array + * of 32-bit integers.*/ +#define N32 (N * 4) +/** N64 is the size of internal state array when regarded as an array + * of 64-bit integers.*/ +#define N64 (N * 2) + +/*---------------------- + the parameters of SFMT + following definitions are in paramsXXXX.h file. + ----------------------*/ +/** the pick up position of the array. +#define POS1 122 +*/ + +/** the parameter of shift left as four 32-bit registers. +#define SL1 18 + */ + +/** the parameter of shift left as one 128-bit register. + * The 128-bit integer is shifted by (SL2 * 8) bits. +#define SL2 1 +*/ + +/** the parameter of shift right as four 32-bit registers. +#define SR1 11 +*/ + +/** the parameter of shift right as one 128-bit register. + * The 128-bit integer is shifted by (SL2 * 8) bits. +#define SR2 1 +*/ + +/** A bitmask, used in the recursion. These parameters are introduced + * to break symmetry of SIMD. +#define MSK1 0xdfffffefU +#define MSK2 0xddfecb7fU +#define MSK3 0xbffaffffU +#define MSK4 0xbffffff6U +*/ + +/** These definitions are part of a 128-bit period certification vector. +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0xc98e126aU +*/ + +#if MEXP == 607 + #include "test/SFMT-params607.h" +#elif MEXP == 1279 + #include "test/SFMT-params1279.h" +#elif MEXP == 2281 + #include "test/SFMT-params2281.h" +#elif MEXP == 4253 + #include "test/SFMT-params4253.h" +#elif MEXP == 11213 + #include "test/SFMT-params11213.h" +#elif MEXP == 19937 + #include "test/SFMT-params19937.h" +#elif MEXP == 44497 + #include "test/SFMT-params44497.h" +#elif MEXP == 86243 + #include "test/SFMT-params86243.h" +#elif MEXP == 132049 + #include "test/SFMT-params132049.h" +#elif MEXP == 216091 + #include "test/SFMT-params216091.h" +#else +#ifdef __GNUC__ + #error "MEXP is not valid." + #undef MEXP +#else + #undef MEXP +#endif + +#endif + +#endif /* SFMT_PARAMS_H */ diff --git a/deps/jemalloc/test/include/test/SFMT-params11213.h b/deps/jemalloc/test/include/test/SFMT-params11213.h new file mode 100644 index 0000000..2994bd2 --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT-params11213.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS11213_H +#define SFMT_PARAMS11213_H + +#define POS1 68 +#define SL1 14 +#define SL2 3 +#define SR1 7 +#define SR2 3 +#define MSK1 0xeffff7fbU +#define MSK2 0xffffffefU +#define MSK3 0xdfdfbfffU +#define MSK4 0x7fffdbfdU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xe8148000U +#define PARITY4 0xd0c7afa3U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} + #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} +#endif /* For OSX */ +#define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd" + +#endif /* SFMT_PARAMS11213_H */ diff --git a/deps/jemalloc/test/include/test/SFMT-params1279.h b/deps/jemalloc/test/include/test/SFMT-params1279.h new file mode 100644 index 0000000..d7959f9 --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT-params1279.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS1279_H +#define SFMT_PARAMS1279_H + +#define POS1 7 +#define SL1 14 +#define SL2 3 +#define SR1 5 +#define SR2 1 +#define MSK1 0xf7fefffdU +#define MSK2 0x7fefcfffU +#define MSK3 0xaff3ef3fU +#define MSK4 0xb5ffff7fU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x20000000U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f" + +#endif /* SFMT_PARAMS1279_H */ diff --git a/deps/jemalloc/test/include/test/SFMT-params132049.h b/deps/jemalloc/test/include/test/SFMT-params132049.h new file mode 100644 index 0000000..a1dcec3 --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT-params132049.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS132049_H +#define SFMT_PARAMS132049_H + +#define POS1 110 +#define SL1 19 +#define SL2 1 +#define SR1 21 +#define SR2 1 +#define MSK1 0xffffbb5fU +#define MSK2 0xfb6ebf95U +#define MSK3 0xfffefffaU +#define MSK4 0xcff77fffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xcb520000U +#define PARITY4 0xc7e91c7dU + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff" + +#endif /* SFMT_PARAMS132049_H */ diff --git a/deps/jemalloc/test/include/test/SFMT-params19937.h b/deps/jemalloc/test/include/test/SFMT-params19937.h new file mode 100644 index 0000000..fb92b4c --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT-params19937.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS19937_H +#define SFMT_PARAMS19937_H + +#define POS1 122 +#define SL1 18 +#define SL2 1 +#define SR1 11 +#define SR2 1 +#define MSK1 0xdfffffefU +#define MSK2 0xddfecb7fU +#define MSK3 0xbffaffffU +#define MSK4 0xbffffff6U +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x13c9e684U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6" + +#endif /* SFMT_PARAMS19937_H */ diff --git a/deps/jemalloc/test/include/test/SFMT-params216091.h b/deps/jemalloc/test/include/test/SFMT-params216091.h new file mode 100644 index 0000000..125ce28 --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT-params216091.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS216091_H +#define SFMT_PARAMS216091_H + +#define POS1 627 +#define SL1 11 +#define SL2 3 +#define SR1 10 +#define SR2 1 +#define MSK1 0xbff7bff7U +#define MSK2 0xbfffffffU +#define MSK3 0xbffffa7fU +#define MSK4 0xffddfbfbU +#define PARITY1 0xf8000001U +#define PARITY2 0x89e80709U +#define PARITY3 0x3bd2b64bU +#define PARITY4 0x0c64b1e4U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb" + +#endif /* SFMT_PARAMS216091_H */ diff --git a/deps/jemalloc/test/include/test/SFMT-params2281.h b/deps/jemalloc/test/include/test/SFMT-params2281.h new file mode 100644 index 0000000..0ef85c4 --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT-params2281.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS2281_H +#define SFMT_PARAMS2281_H + +#define POS1 12 +#define SL1 19 +#define SL2 1 +#define SR1 5 +#define SR2 1 +#define MSK1 0xbff7ffbfU +#define MSK2 0xfdfffffeU +#define MSK3 0xf7ffef7fU +#define MSK4 0xf2f7cbbfU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x41dfa600U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf" + +#endif /* SFMT_PARAMS2281_H */ diff --git a/deps/jemalloc/test/include/test/SFMT-params4253.h b/deps/jemalloc/test/include/test/SFMT-params4253.h new file mode 100644 index 0000000..9f07bc6 --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT-params4253.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS4253_H +#define SFMT_PARAMS4253_H + +#define POS1 17 +#define SL1 20 +#define SL2 1 +#define SR1 7 +#define SR2 1 +#define MSK1 0x9f7bffffU +#define MSK2 0x9fffff5fU +#define MSK3 0x3efffffbU +#define MSK4 0xfffff7bbU +#define PARITY1 0xa8000001U +#define PARITY2 0xaf5390a3U +#define PARITY3 0xb740b3f8U +#define PARITY4 0x6c11486dU + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb" + +#endif /* SFMT_PARAMS4253_H */ diff --git a/deps/jemalloc/test/include/test/SFMT-params44497.h b/deps/jemalloc/test/include/test/SFMT-params44497.h new file mode 100644 index 0000000..85598fe --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT-params44497.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS44497_H +#define SFMT_PARAMS44497_H + +#define POS1 330 +#define SL1 5 +#define SL2 3 +#define SR1 9 +#define SR2 3 +#define MSK1 0xeffffffbU +#define MSK2 0xdfbebfffU +#define MSK3 0xbfbf7befU +#define MSK4 0x9ffd7bffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xa3ac4000U +#define PARITY4 0xecc1327aU + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} + #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} +#endif /* For OSX */ +#define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff" + +#endif /* SFMT_PARAMS44497_H */ diff --git a/deps/jemalloc/test/include/test/SFMT-params607.h b/deps/jemalloc/test/include/test/SFMT-params607.h new file mode 100644 index 0000000..bc76485 --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT-params607.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS607_H +#define SFMT_PARAMS607_H + +#define POS1 2 +#define SL1 15 +#define SL2 3 +#define SR1 13 +#define SR2 3 +#define MSK1 0xfdff37ffU +#define MSK2 0xef7f3f7dU +#define MSK3 0xff777b7dU +#define MSK4 0x7ff7fb2fU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x5986f054U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} + #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} +#endif /* For OSX */ +#define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f" + +#endif /* SFMT_PARAMS607_H */ diff --git a/deps/jemalloc/test/include/test/SFMT-params86243.h b/deps/jemalloc/test/include/test/SFMT-params86243.h new file mode 100644 index 0000000..5e4d783 --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT-params86243.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS86243_H +#define SFMT_PARAMS86243_H + +#define POS1 366 +#define SL1 6 +#define SL2 7 +#define SR1 19 +#define SR2 1 +#define MSK1 0xfdbffbffU +#define MSK2 0xbff7ff3fU +#define MSK3 0xfd77efffU +#define MSK4 0xbf9ff3ffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0xe9528d85U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6} + #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff" + +#endif /* SFMT_PARAMS86243_H */ diff --git a/deps/jemalloc/test/include/test/SFMT-sse2.h b/deps/jemalloc/test/include/test/SFMT-sse2.h new file mode 100644 index 0000000..169ad55 --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT-sse2.h @@ -0,0 +1,157 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT-sse2.h + * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2 + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * @note We assume LITTLE ENDIAN in this file + * + * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software, see LICENSE.txt + */ + +#ifndef SFMT_SSE2_H +#define SFMT_SSE2_H + +/** + * This function represents the recursion formula. + * @param a a 128-bit part of the interal state array + * @param b a 128-bit part of the interal state array + * @param c a 128-bit part of the interal state array + * @param d a 128-bit part of the interal state array + * @param mask 128-bit mask + * @return output + */ +JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, + __m128i c, __m128i d, __m128i mask) { + __m128i v, x, y, z; + + x = _mm_load_si128(a); + y = _mm_srli_epi32(*b, SR1); + z = _mm_srli_si128(c, SR2); + v = _mm_slli_epi32(d, SL1); + z = _mm_xor_si128(z, x); + z = _mm_xor_si128(z, v); + x = _mm_slli_si128(x, SL2); + y = _mm_and_si128(y, mask); + z = _mm_xor_si128(z, x); + z = _mm_xor_si128(z, y); + return z; +} + +/** + * This function fills the internal state array with pseudorandom + * integers. + */ +static inline void gen_rand_all(sfmt_t *ctx) { + int i; + __m128i r, r1, r2, mask; + mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); + + r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); + r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); + for (i = 0; i < N - POS1; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, + mask); + _mm_store_si128(&ctx->sfmt[i].si, r); + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&ctx->sfmt[i].si, r); + r1 = r2; + r2 = r; + } +} + +/** + * This function fills the user-specified array with pseudorandom + * integers. + * + * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param size number of 128-bit pesudorandom numbers to be generated. + */ +static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + __m128i r, r1, r2, mask; + mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); + + r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); + r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); + for (i = 0; i < N - POS1; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + /* main loop */ + for (; i < size - N; i++) { + r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + for (j = 0; j < 2 * N - size; j++) { + r = _mm_load_si128(&array[j + size - N].si); + _mm_store_si128(&ctx->sfmt[j].si, r); + } + for (; i < size; i++) { + r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + _mm_store_si128(&ctx->sfmt[j++].si, r); + r1 = r2; + r2 = r; + } +} + +#endif diff --git a/deps/jemalloc/test/include/test/SFMT.h b/deps/jemalloc/test/include/test/SFMT.h new file mode 100644 index 0000000..863fc55 --- /dev/null +++ b/deps/jemalloc/test/include/test/SFMT.h @@ -0,0 +1,146 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT.h + * + * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom + * number generator + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software. + * see LICENSE.txt + * + * @note We assume that your system has inttypes.h. If your system + * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t, + * and you have to define PRIu64 and PRIx64 in this file as follows: + * @verbatim + typedef unsigned int uint32_t + typedef unsigned long long uint64_t + #define PRIu64 "llu" + #define PRIx64 "llx" +@endverbatim + * uint32_t must be exactly 32-bit unsigned integer type (no more, no + * less), and uint64_t must be exactly 64-bit unsigned integer type. + * PRIu64 and PRIx64 are used for printf function to print 64-bit + * unsigned int and 64-bit unsigned int in hexadecimal format. + */ + +#ifndef SFMT_H +#define SFMT_H + +typedef struct sfmt_s sfmt_t; + +uint32_t gen_rand32(sfmt_t *ctx); +uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit); +uint64_t gen_rand64(sfmt_t *ctx); +uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit); +void fill_array32(sfmt_t *ctx, uint32_t *array, int size); +void fill_array64(sfmt_t *ctx, uint64_t *array, int size); +sfmt_t *init_gen_rand(uint32_t seed); +sfmt_t *init_by_array(uint32_t *init_key, int key_length); +void fini_gen_rand(sfmt_t *ctx); +const char *get_idstring(void); +int get_min_array_size32(void); +int get_min_array_size64(void); + +/* These real versions are due to Isaku Wada */ +/** generates a random number on [0,1]-real-interval */ +static inline double to_real1(uint32_t v) { + return v * (1.0/4294967295.0); + /* divided by 2^32-1 */ +} + +/** generates a random number on [0,1]-real-interval */ +static inline double genrand_real1(sfmt_t *ctx) { + return to_real1(gen_rand32(ctx)); +} + +/** generates a random number on [0,1)-real-interval */ +static inline double to_real2(uint32_t v) { + return v * (1.0/4294967296.0); + /* divided by 2^32 */ +} + +/** generates a random number on [0,1)-real-interval */ +static inline double genrand_real2(sfmt_t *ctx) { + return to_real2(gen_rand32(ctx)); +} + +/** generates a random number on (0,1)-real-interval */ +static inline double to_real3(uint32_t v) { + return (((double)v) + 0.5)*(1.0/4294967296.0); + /* divided by 2^32 */ +} + +/** generates a random number on (0,1)-real-interval */ +static inline double genrand_real3(sfmt_t *ctx) { + return to_real3(gen_rand32(ctx)); +} +/** These real versions are due to Isaku Wada */ + +/** generates a random number on [0,1) with 53-bit resolution*/ +static inline double to_res53(uint64_t v) { + return v * (1.0/18446744073709551616.0L); +} + +/** generates a random number on [0,1) with 53-bit resolution from two + * 32 bit integers */ +static inline double to_res53_mix(uint32_t x, uint32_t y) { + return to_res53(x | ((uint64_t)y << 32)); +} + +/** generates a random number on [0,1) with 53-bit resolution + */ +static inline double genrand_res53(sfmt_t *ctx) { + return to_res53(gen_rand64(ctx)); +} + +/** generates a random number on [0,1) with 53-bit resolution + using 32bit integer. + */ +static inline double genrand_res53_mix(sfmt_t *ctx) { + uint32_t x, y; + + x = gen_rand32(ctx); + y = gen_rand32(ctx); + return to_res53_mix(x, y); +} +#endif diff --git a/deps/jemalloc/test/include/test/btalloc.h b/deps/jemalloc/test/include/test/btalloc.h new file mode 100644 index 0000000..5877ea7 --- /dev/null +++ b/deps/jemalloc/test/include/test/btalloc.h @@ -0,0 +1,30 @@ +/* btalloc() provides a mechanism for allocating via permuted backtraces. */ +void *btalloc(size_t size, unsigned bits); + +#define btalloc_n_proto(n) \ +void *btalloc_##n(size_t size, unsigned bits); +btalloc_n_proto(0) +btalloc_n_proto(1) + +#define btalloc_n_gen(n) \ +void * \ +btalloc_##n(size_t size, unsigned bits) { \ + void *p; \ + \ + if (bits == 0) { \ + p = mallocx(size, 0); \ + } else { \ + switch (bits & 0x1U) { \ + case 0: \ + p = (btalloc_0(size, bits >> 1)); \ + break; \ + case 1: \ + p = (btalloc_1(size, bits >> 1)); \ + break; \ + default: not_reached(); \ + } \ + } \ + /* Intentionally sabotage tail call optimization. */ \ + assert_ptr_not_null(p, "Unexpected mallocx() failure"); \ + return p; \ +} diff --git a/deps/jemalloc/test/include/test/extent_hooks.h b/deps/jemalloc/test/include/test/extent_hooks.h new file mode 100644 index 0000000..1f06201 --- /dev/null +++ b/deps/jemalloc/test/include/test/extent_hooks.h @@ -0,0 +1,289 @@ +/* + * Boilerplate code used for testing extent hooks via interception and + * passthrough. + */ + +static void *extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit, + unsigned arena_ind); +static bool extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static void extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static bool extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_purge_forced_hook(extent_hooks_t *extent_hooks, + void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_split_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t size_a, size_t size_b, bool committed, + unsigned arena_ind); +static bool extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, + size_t size_a, void *addr_b, size_t size_b, bool committed, + unsigned arena_ind); + +static extent_hooks_t *default_hooks; +static extent_hooks_t hooks = { + extent_alloc_hook, + extent_dalloc_hook, + extent_destroy_hook, + extent_commit_hook, + extent_decommit_hook, + extent_purge_lazy_hook, + extent_purge_forced_hook, + extent_split_hook, + extent_merge_hook +}; + +/* Control whether hook functions pass calls through to default hooks. */ +static bool try_alloc = true; +static bool try_dalloc = true; +static bool try_destroy = true; +static bool try_commit = true; +static bool try_decommit = true; +static bool try_purge_lazy = true; +static bool try_purge_forced = true; +static bool try_split = true; +static bool try_merge = true; + +/* Set to false prior to operations, then introspect after operations. */ +static bool called_alloc; +static bool called_dalloc; +static bool called_destroy; +static bool called_commit; +static bool called_decommit; +static bool called_purge_lazy; +static bool called_purge_forced; +static bool called_split; +static bool called_merge; + +/* Set to false prior to operations, then introspect after operations. */ +static bool did_alloc; +static bool did_dalloc; +static bool did_destroy; +static bool did_commit; +static bool did_decommit; +static bool did_purge_lazy; +static bool did_purge_forced; +static bool did_split; +static bool did_merge; + +#if 0 +# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__) +#else +# define TRACE_HOOK(fmt, ...) +#endif + +static void * +extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { + void *ret; + + TRACE_HOOK("%s(extent_hooks=%p, new_addr=%p, size=%zu, alignment=%zu, " + "*zero=%s, *commit=%s, arena_ind=%u)\n", __func__, extent_hooks, + new_addr, size, alignment, *zero ? "true" : "false", *commit ? + "true" : "false", arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->alloc, extent_alloc_hook, + "Wrong hook function"); + called_alloc = true; + if (!try_alloc) { + return NULL; + } + ret = default_hooks->alloc(default_hooks, new_addr, size, alignment, + zero, commit, 0); + did_alloc = (ret != NULL); + return ret; +} + +static bool +extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " + "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? + "true" : "false", arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_hook, + "Wrong hook function"); + called_dalloc = true; + if (!try_dalloc) { + return true; + } + err = default_hooks->dalloc(default_hooks, addr, size, committed, 0); + did_dalloc = !err; + return err; +} + +static void +extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " + "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? + "true" : "false", arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->destroy, extent_destroy_hook, + "Wrong hook function"); + called_destroy = true; + if (!try_destroy) { + return; + } + default_hooks->destroy(default_hooks, addr, size, committed, 0); + did_destroy = true; +} + +static bool +extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size, + offset, length, arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->commit, extent_commit_hook, + "Wrong hook function"); + called_commit = true; + if (!try_commit) { + return true; + } + err = default_hooks->commit(default_hooks, addr, size, offset, length, + 0); + did_commit = !err; + return err; +} + +static bool +extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size, + offset, length, arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->decommit, extent_decommit_hook, + "Wrong hook function"); + called_decommit = true; + if (!try_decommit) { + return true; + } + err = default_hooks->decommit(default_hooks, addr, size, offset, length, + 0); + did_decommit = !err; + return err; +} + +static bool +extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size, + offset, length, arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->purge_lazy, extent_purge_lazy_hook, + "Wrong hook function"); + called_purge_lazy = true; + if (!try_purge_lazy) { + return true; + } + err = default_hooks->purge_lazy == NULL || + default_hooks->purge_lazy(default_hooks, addr, size, offset, length, + 0); + did_purge_lazy = !err; + return err; +} + +static bool +extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size, + offset, length, arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->purge_forced, extent_purge_forced_hook, + "Wrong hook function"); + called_purge_forced = true; + if (!try_purge_forced) { + return true; + } + err = default_hooks->purge_forced == NULL || + default_hooks->purge_forced(default_hooks, addr, size, offset, + length, 0); + did_purge_forced = !err; + return err; +} + +static bool +extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, size_a=%zu, " + "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks, + addr, size, size_a, size_b, committed ? "true" : "false", + arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->split, extent_split_hook, + "Wrong hook function"); + called_split = true; + if (!try_split) { + return true; + } + err = (default_hooks->split == NULL || + default_hooks->split(default_hooks, addr, size, size_a, size_b, + committed, 0)); + did_split = !err; + return err; +} + +static bool +extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr_a=%p, size_a=%zu, addr_b=%p " + "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks, + addr_a, size_a, addr_b, size_b, committed ? "true" : "false", + arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->merge, extent_merge_hook, + "Wrong hook function"); + assert_ptr_eq((void *)((uintptr_t)addr_a + size_a), addr_b, + "Extents not mergeable"); + called_merge = true; + if (!try_merge) { + return true; + } + err = (default_hooks->merge == NULL || + default_hooks->merge(default_hooks, addr_a, size_a, addr_b, size_b, + committed, 0)); + did_merge = !err; + return err; +} + +static void +extent_hooks_prep(void) { + size_t sz; + + sz = sizeof(default_hooks); + assert_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks, &sz, + NULL, 0), 0, "Unexpected mallctl() error"); +} diff --git a/deps/jemalloc/test/include/test/jemalloc_test.h.in b/deps/jemalloc/test/include/test/jemalloc_test.h.in new file mode 100644 index 0000000..c46af5d --- /dev/null +++ b/deps/jemalloc/test/include/test/jemalloc_test.h.in @@ -0,0 +1,173 @@ +#ifdef __cplusplus +extern "C" { +#endif + +#include +#ifndef SIZE_T_MAX +# define SIZE_T_MAX SIZE_MAX +#endif +#include +#include +#include +#include +#include +#include +#ifdef _WIN32 +# include "msvc_compat/strings.h" +#endif + +#ifdef _WIN32 +# include +# include "msvc_compat/windows_extra.h" +#else +# include +#endif + +#include "test/jemalloc_test_defs.h" + +#if defined(JEMALLOC_OSATOMIC) +# include +#endif + +#if defined(HAVE_ALTIVEC) && !defined(__APPLE__) +# include +#endif +#ifdef HAVE_SSE2 +# include +#endif + +/******************************************************************************/ +/* + * For unit tests, expose all public and private interfaces. + */ +#ifdef JEMALLOC_UNIT_TEST +# define JEMALLOC_JET +# define JEMALLOC_MANGLE +# include "jemalloc/internal/jemalloc_preamble.h" +# include "jemalloc/internal/jemalloc_internal_includes.h" + +/******************************************************************************/ +/* + * For integration tests, expose the public jemalloc interfaces, but only + * expose the minimum necessary internal utility code (to avoid re-implementing + * essentially identical code within the test infrastructure). + */ +#elif defined(JEMALLOC_INTEGRATION_TEST) || \ + defined(JEMALLOC_INTEGRATION_CPP_TEST) +# define JEMALLOC_MANGLE +# include "jemalloc/jemalloc@install_suffix@.h" +# include "jemalloc/internal/jemalloc_internal_defs.h" +# include "jemalloc/internal/jemalloc_internal_macros.h" + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; + +# define JEMALLOC_N(n) @private_namespace@##n +# include "jemalloc/internal/private_namespace.h" +# include "jemalloc/internal/test_hooks.h" + +/* Hermetic headers. */ +# include "jemalloc/internal/assert.h" +# include "jemalloc/internal/malloc_io.h" +# include "jemalloc/internal/nstime.h" +# include "jemalloc/internal/util.h" + +/* Non-hermetic headers. */ +# include "jemalloc/internal/qr.h" +# include "jemalloc/internal/ql.h" + +/******************************************************************************/ +/* + * For stress tests, expose the public jemalloc interfaces with name mangling + * so that they can be tested as e.g. malloc() and free(). Also expose the + * public jemalloc interfaces with jet_ prefixes, so that stress tests can use + * a separate allocator for their internal data structures. + */ +#elif defined(JEMALLOC_STRESS_TEST) +# include "jemalloc/jemalloc@install_suffix@.h" + +# include "jemalloc/jemalloc_protos_jet.h" + +# define JEMALLOC_JET +# include "jemalloc/internal/jemalloc_preamble.h" +# include "jemalloc/internal/jemalloc_internal_includes.h" +# include "jemalloc/internal/public_unnamespace.h" +# undef JEMALLOC_JET + +# include "jemalloc/jemalloc_rename.h" +# define JEMALLOC_MANGLE +# ifdef JEMALLOC_STRESS_TESTLIB +# include "jemalloc/jemalloc_mangle_jet.h" +# else +# include "jemalloc/jemalloc_mangle.h" +# endif + +/******************************************************************************/ +/* + * This header does dangerous things, the effects of which only test code + * should be subject to. + */ +#else +# error "This header cannot be included outside a testing context" +#endif + +/******************************************************************************/ +/* + * Common test utilities. + */ +#include "test/btalloc.h" +#include "test/math.h" +#include "test/mtx.h" +#include "test/mq.h" +#include "test/test.h" +#include "test/timer.h" +#include "test/thd.h" +#define MEXP 19937 +#include "test/SFMT.h" + +/******************************************************************************/ +/* + * Define always-enabled assertion macros, so that test assertions execute even + * if assertions are disabled in the library code. + */ +#undef assert +#undef not_reached +#undef not_implemented +#undef assert_not_implemented + +#define assert(e) do { \ + if (!(e)) { \ + malloc_printf( \ + ": %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ +} while (0) + +#define not_reached() do { \ + malloc_printf( \ + ": %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ +} while (0) + +#define not_implemented() do { \ + malloc_printf(": %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ +} while (0) + +#define assert_not_implemented(e) do { \ + if (!(e)) { \ + not_implemented(); \ + } \ +} while (0) + +#ifdef __cplusplus +} +#endif diff --git a/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in b/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in new file mode 100644 index 0000000..5cc8532 --- /dev/null +++ b/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in @@ -0,0 +1,9 @@ +#include "jemalloc/internal/jemalloc_internal_defs.h" +#include "jemalloc/internal/jemalloc_internal_decls.h" + +/* + * For use by SFMT. configure.ac doesn't actually define HAVE_SSE2 because its + * dependencies are notoriously unportable in practice. + */ +#undef HAVE_SSE2 +#undef HAVE_ALTIVEC diff --git a/deps/jemalloc/test/include/test/math.h b/deps/jemalloc/test/include/test/math.h new file mode 100644 index 0000000..efba086 --- /dev/null +++ b/deps/jemalloc/test/include/test/math.h @@ -0,0 +1,306 @@ +/* + * Compute the natural log of Gamma(x), accurate to 10 decimal places. + * + * This implementation is based on: + * + * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function + * [S14]. Communications of the ACM 9(9):684. + */ +static inline double +ln_gamma(double x) { + double f, z; + + assert(x > 0.0); + + if (x < 7.0) { + f = 1.0; + z = x; + while (z < 7.0) { + f *= z; + z += 1.0; + } + x = z; + f = -log(f); + } else { + f = 0.0; + } + + z = 1.0 / (x * x); + + return f + (x-0.5) * log(x) - x + 0.918938533204673 + + (((-0.000595238095238 * z + 0.000793650793651) * z - + 0.002777777777778) * z + 0.083333333333333) / x; +} + +/* + * Compute the incomplete Gamma ratio for [0..x], where p is the shape + * parameter, and ln_gamma_p is ln_gamma(p). + * + * This implementation is based on: + * + * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral. + * Applied Statistics 19:285-287. + */ +static inline double +i_gamma(double x, double p, double ln_gamma_p) { + double acu, factor, oflo, gin, term, rn, a, b, an, dif; + double pn[6]; + unsigned i; + + assert(p > 0.0); + assert(x >= 0.0); + + if (x == 0.0) { + return 0.0; + } + + acu = 1.0e-10; + oflo = 1.0e30; + gin = 0.0; + factor = exp(p * log(x) - x - ln_gamma_p); + + if (x <= 1.0 || x < p) { + /* Calculation by series expansion. */ + gin = 1.0; + term = 1.0; + rn = p; + + while (true) { + rn += 1.0; + term *= x / rn; + gin += term; + if (term <= acu) { + gin *= factor / p; + return gin; + } + } + } else { + /* Calculation by continued fraction. */ + a = 1.0 - p; + b = a + x + 1.0; + term = 0.0; + pn[0] = 1.0; + pn[1] = x; + pn[2] = x + 1.0; + pn[3] = x * b; + gin = pn[2] / pn[3]; + + while (true) { + a += 1.0; + b += 2.0; + term += 1.0; + an = a * term; + for (i = 0; i < 2; i++) { + pn[i+4] = b * pn[i+2] - an * pn[i]; + } + if (pn[5] != 0.0) { + rn = pn[4] / pn[5]; + dif = fabs(gin - rn); + if (dif <= acu && dif <= acu * rn) { + gin = 1.0 - factor * gin; + return gin; + } + gin = rn; + } + for (i = 0; i < 4; i++) { + pn[i] = pn[i+2]; + } + + if (fabs(pn[4]) >= oflo) { + for (i = 0; i < 4; i++) { + pn[i] /= oflo; + } + } + } + } +} + +/* + * Given a value p in [0..1] of the lower tail area of the normal distribution, + * compute the limit on the definite integral from [-inf..z] that satisfies p, + * accurate to 16 decimal places. + * + * This implementation is based on: + * + * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal + * distribution. Applied Statistics 37(3):477-484. + */ +static inline double +pt_norm(double p) { + double q, r, ret; + + assert(p > 0.0 && p < 1.0); + + q = p - 0.5; + if (fabs(q) <= 0.425) { + /* p close to 1/2. */ + r = 0.180625 - q * q; + return q * (((((((2.5090809287301226727e3 * r + + 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r + + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) * + r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2) + * r + 3.3871328727963666080e0) / + (((((((5.2264952788528545610e3 * r + + 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r + + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) * + r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1) + * r + 1.0); + } else { + if (q < 0.0) { + r = p; + } else { + r = 1.0 - p; + } + assert(r > 0.0); + + r = sqrt(-log(r)); + if (r <= 5.0) { + /* p neither close to 1/2 nor 0 or 1. */ + r -= 1.6; + ret = ((((((((7.74545014278341407640e-4 * r + + 2.27238449892691845833e-2) * r + + 2.41780725177450611770e-1) * r + + 1.27045825245236838258e0) * r + + 3.64784832476320460504e0) * r + + 5.76949722146069140550e0) * r + + 4.63033784615654529590e0) * r + + 1.42343711074968357734e0) / + (((((((1.05075007164441684324e-9 * r + + 5.47593808499534494600e-4) * r + + 1.51986665636164571966e-2) + * r + 1.48103976427480074590e-1) * r + + 6.89767334985100004550e-1) * r + + 1.67638483018380384940e0) * r + + 2.05319162663775882187e0) * r + 1.0)); + } else { + /* p near 0 or 1. */ + r -= 5.0; + ret = ((((((((2.01033439929228813265e-7 * r + + 2.71155556874348757815e-5) * r + + 1.24266094738807843860e-3) * r + + 2.65321895265761230930e-2) * r + + 2.96560571828504891230e-1) * r + + 1.78482653991729133580e0) * r + + 5.46378491116411436990e0) * r + + 6.65790464350110377720e0) / + (((((((2.04426310338993978564e-15 * r + + 1.42151175831644588870e-7) * r + + 1.84631831751005468180e-5) * r + + 7.86869131145613259100e-4) * r + + 1.48753612908506148525e-2) * r + + 1.36929880922735805310e-1) * r + + 5.99832206555887937690e-1) + * r + 1.0)); + } + if (q < 0.0) { + ret = -ret; + } + return ret; + } +} + +/* + * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution + * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute + * the upper limit on the definite integral from [0..z] that satisfies p, + * accurate to 12 decimal places. + * + * This implementation is based on: + * + * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of + * the Chi^2 distribution. Applied Statistics 24(3):385-388. + * + * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage + * points of the Chi^2 distribution. Applied Statistics 40(1):233-235. + */ +static inline double +pt_chi2(double p, double df, double ln_gamma_df_2) { + double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6; + unsigned i; + + assert(p >= 0.0 && p < 1.0); + assert(df > 0.0); + + e = 5.0e-7; + aa = 0.6931471805; + + xx = 0.5 * df; + c = xx - 1.0; + + if (df < -1.24 * log(p)) { + /* Starting approximation for small Chi^2. */ + ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx); + if (ch - e < 0.0) { + return ch; + } + } else { + if (df > 0.32) { + x = pt_norm(p); + /* + * Starting approximation using Wilson and Hilferty + * estimate. + */ + p1 = 0.222222 / df; + ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0); + /* Starting approximation for p tending to 1. */ + if (ch > 2.2 * df + 6.0) { + ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) + + ln_gamma_df_2); + } + } else { + ch = 0.4; + a = log(1.0 - p); + while (true) { + q = ch; + p1 = 1.0 + ch * (4.67 + ch); + p2 = ch * (6.73 + ch * (6.66 + ch)); + t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch + * (13.32 + 3.0 * ch)) / p2; + ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch + + c * aa) * p2 / p1) / t; + if (fabs(q / ch - 1.0) - 0.01 <= 0.0) { + break; + } + } + } + } + + for (i = 0; i < 20; i++) { + /* Calculation of seven-term Taylor series. */ + q = ch; + p1 = 0.5 * ch; + if (p1 < 0.0) { + return -1.0; + } + p2 = p - i_gamma(p1, xx, ln_gamma_df_2); + t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch)); + b = t / ch; + a = 0.5 * t - b * c; + s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 + + 60.0 * a))))) / 420.0; + s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 * + a)))) / 2520.0; + s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0; + s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a * + (889.0 + 1740.0 * a))) / 5040.0; + s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0; + s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0; + ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3 + - b * (s4 - b * (s5 - b * s6)))))); + if (fabs(q / ch - 1.0) <= e) { + break; + } + } + + return ch; +} + +/* + * Given a value p in [0..1] and Gamma distribution shape and scale parameters, + * compute the upper limit on the definite integral from [0..z] that satisfies + * p. + */ +static inline double +pt_gamma(double p, double shape, double scale, double ln_gamma_shape) { + return pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale; +} diff --git a/deps/jemalloc/test/include/test/mq.h b/deps/jemalloc/test/include/test/mq.h new file mode 100644 index 0000000..af2c078 --- /dev/null +++ b/deps/jemalloc/test/include/test/mq.h @@ -0,0 +1,107 @@ +void mq_nanosleep(unsigned ns); + +/* + * Simple templated message queue implementation that relies on only mutexes for + * synchronization (which reduces portability issues). Given the following + * setup: + * + * typedef struct mq_msg_s mq_msg_t; + * struct mq_msg_s { + * mq_msg(mq_msg_t) link; + * [message data] + * }; + * mq_gen(, mq_, mq_t, mq_msg_t, link) + * + * The API is as follows: + * + * bool mq_init(mq_t *mq); + * void mq_fini(mq_t *mq); + * unsigned mq_count(mq_t *mq); + * mq_msg_t *mq_tryget(mq_t *mq); + * mq_msg_t *mq_get(mq_t *mq); + * void mq_put(mq_t *mq, mq_msg_t *msg); + * + * The message queue linkage embedded in each message is to be treated as + * externally opaque (no need to initialize or clean up externally). mq_fini() + * does not perform any cleanup of messages, since it knows nothing of their + * payloads. + */ +#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) + +#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ +typedef struct { \ + mtx_t lock; \ + ql_head(a_mq_msg_type) msgs; \ + unsigned count; \ +} a_mq_type; \ +a_attr bool \ +a_prefix##init(a_mq_type *mq) { \ + \ + if (mtx_init(&mq->lock)) { \ + return true; \ + } \ + ql_new(&mq->msgs); \ + mq->count = 0; \ + return false; \ +} \ +a_attr void \ +a_prefix##fini(a_mq_type *mq) { \ + mtx_fini(&mq->lock); \ +} \ +a_attr unsigned \ +a_prefix##count(a_mq_type *mq) { \ + unsigned count; \ + \ + mtx_lock(&mq->lock); \ + count = mq->count; \ + mtx_unlock(&mq->lock); \ + return count; \ +} \ +a_attr a_mq_msg_type * \ +a_prefix##tryget(a_mq_type *mq) { \ + a_mq_msg_type *msg; \ + \ + mtx_lock(&mq->lock); \ + msg = ql_first(&mq->msgs); \ + if (msg != NULL) { \ + ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \ + mq->count--; \ + } \ + mtx_unlock(&mq->lock); \ + return msg; \ +} \ +a_attr a_mq_msg_type * \ +a_prefix##get(a_mq_type *mq) { \ + a_mq_msg_type *msg; \ + unsigned ns; \ + \ + msg = a_prefix##tryget(mq); \ + if (msg != NULL) { \ + return msg; \ + } \ + \ + ns = 1; \ + while (true) { \ + mq_nanosleep(ns); \ + msg = a_prefix##tryget(mq); \ + if (msg != NULL) { \ + return msg; \ + } \ + if (ns < 1000*1000*1000) { \ + /* Double sleep time, up to max 1 second. */ \ + ns <<= 1; \ + if (ns > 1000*1000*1000) { \ + ns = 1000*1000*1000; \ + } \ + } \ + } \ +} \ +a_attr void \ +a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) { \ + \ + mtx_lock(&mq->lock); \ + ql_elm_new(msg, a_field); \ + ql_tail_insert(&mq->msgs, msg, a_field); \ + mq->count++; \ + mtx_unlock(&mq->lock); \ +} diff --git a/deps/jemalloc/test/include/test/mtx.h b/deps/jemalloc/test/include/test/mtx.h new file mode 100644 index 0000000..066a213 --- /dev/null +++ b/deps/jemalloc/test/include/test/mtx.h @@ -0,0 +1,21 @@ +/* + * mtx is a slightly simplified version of malloc_mutex. This code duplication + * is unfortunate, but there are allocator bootstrapping considerations that + * would leak into the test infrastructure if malloc_mutex were used directly + * in tests. + */ + +typedef struct { +#ifdef _WIN32 + CRITICAL_SECTION lock; +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock lock; +#else + pthread_mutex_t lock; +#endif +} mtx_t; + +bool mtx_init(mtx_t *mtx); +void mtx_fini(mtx_t *mtx); +void mtx_lock(mtx_t *mtx); +void mtx_unlock(mtx_t *mtx); diff --git a/deps/jemalloc/test/include/test/test.h b/deps/jemalloc/test/include/test/test.h new file mode 100644 index 0000000..fd0e526 --- /dev/null +++ b/deps/jemalloc/test/include/test/test.h @@ -0,0 +1,338 @@ +#define ASSERT_BUFSIZE 256 + +#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \ + t a_ = (a); \ + t b_ = (b); \ + if (!(a_ cmp b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) " #cmp " (%s) --> " \ + "%" pri " " #neg_cmp " %" pri ": ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_, b_); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) + +#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \ + !=, "p", __VA_ARGS__) +#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \ + ==, "p", __VA_ARGS__) +#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \ + !=, "p", __VA_ARGS__) +#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \ + ==, "p", __VA_ARGS__) + +#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) +#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) +#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) +#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) +#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) +#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) + +#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) +#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) +#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) +#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) +#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) +#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) + +#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) +#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) +#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) +#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) +#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) +#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) + +#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) +#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) +#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) +#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) +#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) +#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) + +#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \ + !=, "ld", __VA_ARGS__) +#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \ + ==, "ld", __VA_ARGS__) +#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \ + >=, "ld", __VA_ARGS__) +#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \ + >, "ld", __VA_ARGS__) +#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \ + <, "ld", __VA_ARGS__) +#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \ + <=, "ld", __VA_ARGS__) + +#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \ + a, b, ==, !=, "lu", __VA_ARGS__) +#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \ + a, b, !=, ==, "lu", __VA_ARGS__) +#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \ + a, b, <, >=, "lu", __VA_ARGS__) +#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \ + a, b, <=, >, "lu", __VA_ARGS__) +#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \ + a, b, >=, <, "lu", __VA_ARGS__) +#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \ + a, b, >, <=, "lu", __VA_ARGS__) + +#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \ + !=, "qd", __VA_ARGS__) +#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \ + ==, "qd", __VA_ARGS__) +#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \ + >=, "qd", __VA_ARGS__) +#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \ + >, "qd", __VA_ARGS__) +#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \ + <, "qd", __VA_ARGS__) +#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \ + <=, "qd", __VA_ARGS__) + +#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \ + a, b, ==, !=, "qu", __VA_ARGS__) +#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \ + a, b, !=, ==, "qu", __VA_ARGS__) +#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \ + a, b, <, >=, "qu", __VA_ARGS__) +#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \ + a, b, <=, >, "qu", __VA_ARGS__) +#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \ + a, b, >=, <, "qu", __VA_ARGS__) +#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \ + a, b, >, <=, "qu", __VA_ARGS__) + +#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \ + !=, "jd", __VA_ARGS__) +#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \ + ==, "jd", __VA_ARGS__) +#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \ + >=, "jd", __VA_ARGS__) +#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \ + >, "jd", __VA_ARGS__) +#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \ + <, "jd", __VA_ARGS__) +#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \ + <=, "jd", __VA_ARGS__) + +#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \ + !=, "ju", __VA_ARGS__) +#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \ + ==, "ju", __VA_ARGS__) +#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \ + >=, "ju", __VA_ARGS__) +#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \ + >, "ju", __VA_ARGS__) +#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \ + <, "ju", __VA_ARGS__) +#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \ + <=, "ju", __VA_ARGS__) + +#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \ + !=, "zd", __VA_ARGS__) +#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \ + ==, "zd", __VA_ARGS__) +#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \ + >=, "zd", __VA_ARGS__) +#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \ + >, "zd", __VA_ARGS__) +#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \ + <, "zd", __VA_ARGS__) +#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \ + <=, "zd", __VA_ARGS__) + +#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \ + !=, "zu", __VA_ARGS__) +#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \ + ==, "zu", __VA_ARGS__) +#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \ + >=, "zu", __VA_ARGS__) +#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \ + >, "zu", __VA_ARGS__) +#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \ + <, "zu", __VA_ARGS__) +#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \ + <=, "zu", __VA_ARGS__) + +#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \ + !=, FMTd32, __VA_ARGS__) +#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \ + ==, FMTd32, __VA_ARGS__) +#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \ + >=, FMTd32, __VA_ARGS__) +#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \ + >, FMTd32, __VA_ARGS__) +#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \ + <, FMTd32, __VA_ARGS__) +#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \ + <=, FMTd32, __VA_ARGS__) + +#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \ + !=, FMTu32, __VA_ARGS__) +#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \ + ==, FMTu32, __VA_ARGS__) +#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \ + >=, FMTu32, __VA_ARGS__) +#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \ + >, FMTu32, __VA_ARGS__) +#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \ + <, FMTu32, __VA_ARGS__) +#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \ + <=, FMTu32, __VA_ARGS__) + +#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \ + !=, FMTd64, __VA_ARGS__) +#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \ + ==, FMTd64, __VA_ARGS__) +#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \ + >=, FMTd64, __VA_ARGS__) +#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \ + >, FMTd64, __VA_ARGS__) +#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \ + <, FMTd64, __VA_ARGS__) +#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \ + <=, FMTd64, __VA_ARGS__) + +#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \ + !=, FMTu64, __VA_ARGS__) +#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \ + ==, FMTu64, __VA_ARGS__) +#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \ + >=, FMTu64, __VA_ARGS__) +#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \ + >, FMTu64, __VA_ARGS__) +#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \ + <, FMTu64, __VA_ARGS__) +#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \ + <=, FMTu64, __VA_ARGS__) + +#define assert_b_eq(a, b, ...) do { \ + bool a_ = (a); \ + bool b_ = (b); \ + if (!(a_ == b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) == (%s) --> %s != %s: ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_ ? "true" : "false", \ + b_ ? "true" : "false"); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) +#define assert_b_ne(a, b, ...) do { \ + bool a_ = (a); \ + bool b_ = (b); \ + if (!(a_ != b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) != (%s) --> %s == %s: ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_ ? "true" : "false", \ + b_ ? "true" : "false"); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) +#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) +#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) + +#define assert_str_eq(a, b, ...) do { \ + if (strcmp((a), (b))) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) same as (%s) --> " \ + "\"%s\" differs from \"%s\": ", \ + __func__, __FILE__, __LINE__, #a, #b, a, b); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) +#define assert_str_ne(a, b, ...) do { \ + if (!strcmp((a), (b))) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) differs from (%s) --> " \ + "\"%s\" same as \"%s\": ", \ + __func__, __FILE__, __LINE__, #a, #b, a, b); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) + +#define assert_not_reached(...) do { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Unreachable code reached: ", \ + __func__, __FILE__, __LINE__); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ +} while (0) + +/* + * If this enum changes, corresponding changes in test/test.sh.in are also + * necessary. + */ +typedef enum { + test_status_pass = 0, + test_status_skip = 1, + test_status_fail = 2, + + test_status_count = 3 +} test_status_t; + +typedef void (test_t)(void); + +#define TEST_BEGIN(f) \ +static void \ +f(void) { \ + p_test_init(#f); + +#define TEST_END \ + goto label_test_end; \ +label_test_end: \ + p_test_fini(); \ +} + +#define test(...) \ + p_test(__VA_ARGS__, NULL) + +#define test_no_reentrancy(...) \ + p_test_no_reentrancy(__VA_ARGS__, NULL) + +#define test_no_malloc_init(...) \ + p_test_no_malloc_init(__VA_ARGS__, NULL) + +#define test_skip_if(e) do { \ + if (e) { \ + test_skip("%s:%s:%d: Test skipped: (%s)", \ + __func__, __FILE__, __LINE__, #e); \ + goto label_test_end; \ + } \ +} while (0) + +bool test_is_reentrant(); + +void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); +void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); + +/* For private use by macros. */ +test_status_t p_test(test_t *t, ...); +test_status_t p_test_no_reentrancy(test_t *t, ...); +test_status_t p_test_no_malloc_init(test_t *t, ...); +void p_test_init(const char *name); +void p_test_fini(void); +void p_test_fail(const char *prefix, const char *message); diff --git a/deps/jemalloc/test/include/test/thd.h b/deps/jemalloc/test/include/test/thd.h new file mode 100644 index 0000000..47a5126 --- /dev/null +++ b/deps/jemalloc/test/include/test/thd.h @@ -0,0 +1,9 @@ +/* Abstraction layer for threading in tests. */ +#ifdef _WIN32 +typedef HANDLE thd_t; +#else +typedef pthread_t thd_t; +#endif + +void thd_create(thd_t *thd, void *(*proc)(void *), void *arg); +void thd_join(thd_t thd, void **ret); diff --git a/deps/jemalloc/test/include/test/timer.h b/deps/jemalloc/test/include/test/timer.h new file mode 100644 index 0000000..ace6191 --- /dev/null +++ b/deps/jemalloc/test/include/test/timer.h @@ -0,0 +1,11 @@ +/* Simple timer, for use in benchmark reporting. */ + +typedef struct { + nstime_t t0; + nstime_t t1; +} timedelta_t; + +void timer_start(timedelta_t *timer); +void timer_stop(timedelta_t *timer); +uint64_t timer_usec(const timedelta_t *timer); +void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen); diff --git a/deps/jemalloc/test/integration/MALLOCX_ARENA.c b/deps/jemalloc/test/integration/MALLOCX_ARENA.c new file mode 100644 index 0000000..222164d --- /dev/null +++ b/deps/jemalloc/test/integration/MALLOCX_ARENA.c @@ -0,0 +1,66 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 10 + +static bool have_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; + +void * +thd_start(void *arg) { + unsigned thread_ind = (unsigned)(uintptr_t)arg; + unsigned arena_ind; + void *p; + size_t sz; + + sz = sizeof(arena_ind); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Error in arenas.create"); + + if (thread_ind % 4 != 3) { + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); + const char *dss_precs[] = {"disabled", "primary", "secondary"}; + unsigned prec_ind = thread_ind % + (sizeof(dss_precs)/sizeof(char*)); + const char *dss = dss_precs[prec_ind]; + int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT; + assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, + "Error in mallctlnametomib()"); + mib[1] = arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss, + sizeof(const char *)), expected_err, + "Error in mallctlbymib()"); + } + + p = mallocx(1, MALLOCX_ARENA(arena_ind)); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + dallocx(p, 0); + + return NULL; +} + +TEST_BEGIN(test_MALLOCX_ARENA) { + thd_t thds[NTHREADS]; + unsigned i; + + for (i = 0; i < NTHREADS; i++) { + thd_create(&thds[i], thd_start, + (void *)(uintptr_t)i); + } + + for (i = 0; i < NTHREADS; i++) { + thd_join(thds[i], NULL); + } +} +TEST_END + +int +main(void) { + return test( + test_MALLOCX_ARENA); +} diff --git a/deps/jemalloc/test/integration/aligned_alloc.c b/deps/jemalloc/test/integration/aligned_alloc.c new file mode 100644 index 0000000..4375b17 --- /dev/null +++ b/deps/jemalloc/test/integration/aligned_alloc.c @@ -0,0 +1,157 @@ +#include "test/jemalloc_test.h" + +#define MAXALIGN (((size_t)1) << 23) + +/* + * On systems which can't merge extents, tests that call this function generate + * a lot of dirty memory very quickly. Purging between cycles mitigates + * potential OOM on e.g. 32-bit Windows. + */ +static void +purge(void) { + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl error"); +} + +TEST_BEGIN(test_alignment_errors) { + size_t alignment; + void *p; + + alignment = 0; + set_errno(0); + p = aligned_alloc(alignment, 1); + assert_false(p != NULL || get_errno() != EINVAL, + "Expected error for invalid alignment %zu", alignment); + + for (alignment = sizeof(size_t); alignment < MAXALIGN; + alignment <<= 1) { + set_errno(0); + p = aligned_alloc(alignment + 1, 1); + assert_false(p != NULL || get_errno() != EINVAL, + "Expected error for invalid alignment %zu", + alignment + 1); + } +} +TEST_END + + +/* + * GCC "-Walloc-size-larger-than" warning detects when one of the memory + * allocation functions is called with a size larger than the maximum size that + * they support. Here we want to explicitly test that the allocation functions + * do indeed fail properly when this is the case, which triggers the warning. + * Therefore we disable the warning for these tests. + */ +JEMALLOC_DIAGNOSTIC_PUSH +JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN + +TEST_BEGIN(test_oom_errors) { + size_t alignment, size; + void *p; + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x8000000000000000); + size = UINT64_C(0x8000000000000000); +#else + alignment = 0x80000000LU; + size = 0x80000000LU; +#endif + set_errno(0); + p = aligned_alloc(alignment, size); + assert_false(p != NULL || get_errno() != ENOMEM, + "Expected error for aligned_alloc(%zu, %zu)", + alignment, size); + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x4000000000000000); + size = UINT64_C(0xc000000000000001); +#else + alignment = 0x40000000LU; + size = 0xc0000001LU; +#endif + set_errno(0); + p = aligned_alloc(alignment, size); + assert_false(p != NULL || get_errno() != ENOMEM, + "Expected error for aligned_alloc(%zu, %zu)", + alignment, size); + + alignment = 0x10LU; +#if LG_SIZEOF_PTR == 3 + size = UINT64_C(0xfffffffffffffff0); +#else + size = 0xfffffff0LU; +#endif + set_errno(0); + p = aligned_alloc(alignment, size); + assert_false(p != NULL || get_errno() != ENOMEM, + "Expected error for aligned_alloc(&p, %zu, %zu)", + alignment, size); +} +TEST_END + +/* Re-enable the "-Walloc-size-larger-than=" warning */ +JEMALLOC_DIAGNOSTIC_POP + +TEST_BEGIN(test_alignment_and_size) { +#define NITER 4 + size_t alignment, size, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) { + ps[i] = NULL; + } + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (size = 1; + size < 3 * alignment && size < (1U << 31); + size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + ps[i] = aligned_alloc(alignment, size); + if (ps[i] == NULL) { + char buf[BUFERROR_BUF]; + + buferror(get_errno(), buf, sizeof(buf)); + test_fail( + "Error for alignment=%zu, " + "size=%zu (%#zx): %s", + alignment, size, size, buf); + } + total += malloc_usable_size(ps[i]); + if (total >= (MAXALIGN << 1)) { + break; + } + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + free(ps[i]); + ps[i] = NULL; + } + } + } + purge(); + } +#undef NITER +} +TEST_END + +TEST_BEGIN(test_zero_alloc) { + void *res = aligned_alloc(8, 0); + assert(res); + size_t usable = malloc_usable_size(res); + assert(usable > 0); + free(res); +} +TEST_END + +int +main(void) { + return test( + test_alignment_errors, + test_oom_errors, + test_alignment_and_size, + test_zero_alloc); +} diff --git a/deps/jemalloc/test/integration/allocated.c b/deps/jemalloc/test/integration/allocated.c new file mode 100644 index 0000000..1425fd0 --- /dev/null +++ b/deps/jemalloc/test/integration/allocated.c @@ -0,0 +1,124 @@ +#include "test/jemalloc_test.h" + +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; + +void * +thd_start(void *arg) { + int err; + void *p; + uint64_t a0, a1, d0, d1; + uint64_t *ap0, *ap1, *dp0, *dp1; + size_t sz, usize; + + sz = sizeof(a0); + if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) { + if (err == ENOENT) { + goto label_ENOENT; + } + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + sz = sizeof(ap0); + if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) { + if (err == ENOENT) { + goto label_ENOENT; + } + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + assert_u64_eq(*ap0, a0, + "\"thread.allocatedp\" should provide a pointer to internal " + "storage"); + + sz = sizeof(d0); + if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) { + if (err == ENOENT) { + goto label_ENOENT; + } + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + sz = sizeof(dp0); + if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL, + 0))) { + if (err == ENOENT) { + goto label_ENOENT; + } + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + assert_u64_eq(*dp0, d0, + "\"thread.deallocatedp\" should provide a pointer to internal " + "storage"); + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() error"); + + sz = sizeof(a1); + mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0); + sz = sizeof(ap1); + mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0); + assert_u64_eq(*ap1, a1, + "Dereferenced \"thread.allocatedp\" value should equal " + "\"thread.allocated\" value"); + assert_ptr_eq(ap0, ap1, + "Pointer returned by \"thread.allocatedp\" should not change"); + + usize = malloc_usable_size(p); + assert_u64_le(a0 + usize, a1, + "Allocated memory counter should increase by at least the amount " + "explicitly allocated"); + + free(p); + + sz = sizeof(d1); + mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0); + sz = sizeof(dp1); + mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0); + assert_u64_eq(*dp1, d1, + "Dereferenced \"thread.deallocatedp\" value should equal " + "\"thread.deallocated\" value"); + assert_ptr_eq(dp0, dp1, + "Pointer returned by \"thread.deallocatedp\" should not change"); + + assert_u64_le(d0 + usize, d1, + "Deallocated memory counter should increase by at least the amount " + "explicitly deallocated"); + + return NULL; +label_ENOENT: + assert_false(config_stats, + "ENOENT should only be returned if stats are disabled"); + test_skip("\"thread.allocated\" mallctl not available"); + return NULL; +} + +TEST_BEGIN(test_main_thread) { + thd_start(NULL); +} +TEST_END + +TEST_BEGIN(test_subthread) { + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} +TEST_END + +int +main(void) { + /* Run tests multiple times to check for bad interactions. */ + return test( + test_main_thread, + test_subthread, + test_main_thread, + test_subthread, + test_main_thread); +} diff --git a/deps/jemalloc/test/integration/cpp/basic.cpp b/deps/jemalloc/test/integration/cpp/basic.cpp new file mode 100644 index 0000000..65890ec --- /dev/null +++ b/deps/jemalloc/test/integration/cpp/basic.cpp @@ -0,0 +1,25 @@ +#include +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_basic) { + auto foo = new long(4); + assert_ptr_not_null(foo, "Unexpected new[] failure"); + delete foo; + // Test nullptr handling. + foo = nullptr; + delete foo; + + auto bar = new long; + assert_ptr_not_null(bar, "Unexpected new failure"); + delete bar; + // Test nullptr handling. + bar = nullptr; + delete bar; +} +TEST_END + +int +main() { + return test( + test_basic); +} diff --git a/deps/jemalloc/test/integration/extent.c b/deps/jemalloc/test/integration/extent.c new file mode 100644 index 0000000..b5db087 --- /dev/null +++ b/deps/jemalloc/test/integration/extent.c @@ -0,0 +1,248 @@ +#include "test/jemalloc_test.h" + +#include "test/extent_hooks.h" + +static bool +check_background_thread_enabled(void) { + bool enabled; + size_t sz = sizeof(bool); + int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0); + if (ret == ENOENT) { + return false; + } + assert_d_eq(ret, 0, "Unexpected mallctl error"); + return enabled; +} + +static void +test_extent_body(unsigned arena_ind) { + void *p; + size_t large0, large1, large2, sz; + size_t purge_mib[3]; + size_t purge_miblen; + int flags; + bool xallocx_success_a, xallocx_success_b, xallocx_success_c; + + flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + + /* Get large size classes. */ + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, + 0), 0, "Unexpected arenas.lextent.0.size failure"); + assert_d_eq(mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL, + 0), 0, "Unexpected arenas.lextent.1.size failure"); + assert_d_eq(mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL, + 0), 0, "Unexpected arenas.lextent.2.size failure"); + + /* Test dalloc/decommit/purge cascade. */ + purge_miblen = sizeof(purge_mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen), + 0, "Unexpected mallctlnametomib() failure"); + purge_mib[1] = (size_t)arena_ind; + called_alloc = false; + try_alloc = true; + try_dalloc = false; + try_decommit = false; + p = mallocx(large0 * 2, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + assert_true(called_alloc, "Expected alloc call"); + called_dalloc = false; + called_decommit = false; + did_purge_lazy = false; + did_purge_forced = false; + called_split = false; + xallocx_success_a = (xallocx(p, large0, 0, flags) == large0); + assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), + 0, "Unexpected arena.%u.purge error", arena_ind); + if (xallocx_success_a) { + assert_true(called_dalloc, "Expected dalloc call"); + assert_true(called_decommit, "Expected decommit call"); + assert_true(did_purge_lazy || did_purge_forced, + "Expected purge"); + } + assert_true(called_split, "Expected split call"); + dallocx(p, flags); + try_dalloc = true; + + /* Test decommit/commit and observe split/merge. */ + try_dalloc = false; + try_decommit = true; + p = mallocx(large0 * 2, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + did_decommit = false; + did_commit = false; + called_split = false; + did_split = false; + did_merge = false; + xallocx_success_b = (xallocx(p, large0, 0, flags) == large0); + assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), + 0, "Unexpected arena.%u.purge error", arena_ind); + if (xallocx_success_b) { + assert_true(did_split, "Expected split"); + } + xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2); + if (did_split) { + assert_b_eq(did_decommit, did_commit, + "Expected decommit/commit match"); + } + if (xallocx_success_b && xallocx_success_c) { + assert_true(did_merge, "Expected merge"); + } + dallocx(p, flags); + try_dalloc = true; + try_decommit = false; + + /* Make sure non-large allocation succeeds. */ + p = mallocx(42, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + dallocx(p, flags); +} + +static void +test_manual_hook_auto_arena(void) { + unsigned narenas; + size_t old_size, new_size, sz; + size_t hooks_mib[3]; + size_t hooks_miblen; + extent_hooks_t *new_hooks, *old_hooks; + + extent_hooks_prep(); + + sz = sizeof(unsigned); + /* Get number of auto arenas. */ + assert_d_eq(mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + if (narenas == 1) { + return; + } + + /* Install custom extent hooks on arena 1 (might not be initialized). */ + hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib, + &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); + hooks_mib[1] = 1; + old_size = sizeof(extent_hooks_t *); + new_hooks = &hooks; + new_size = sizeof(extent_hooks_t *); + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, + &old_size, (void *)&new_hooks, new_size), 0, + "Unexpected extent_hooks error"); + static bool auto_arena_created = false; + if (old_hooks != &hooks) { + assert_b_eq(auto_arena_created, false, + "Expected auto arena 1 created only once."); + auto_arena_created = true; + } +} + +static void +test_manual_hook_body(void) { + unsigned arena_ind; + size_t old_size, new_size, sz; + size_t hooks_mib[3]; + size_t hooks_miblen; + extent_hooks_t *new_hooks, *old_hooks; + + extent_hooks_prep(); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + + /* Install custom extent hooks. */ + hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib, + &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); + hooks_mib[1] = (size_t)arena_ind; + old_size = sizeof(extent_hooks_t *); + new_hooks = &hooks; + new_size = sizeof(extent_hooks_t *); + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, + &old_size, (void *)&new_hooks, new_size), 0, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->alloc, extent_alloc_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->dalloc, extent_dalloc_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->commit, extent_commit_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->decommit, extent_decommit_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->purge_lazy, extent_purge_lazy_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->purge_forced, extent_purge_forced_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->split, extent_split_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->merge, extent_merge_hook, + "Unexpected extent_hooks error"); + + if (!check_background_thread_enabled()) { + test_extent_body(arena_ind); + } + + /* Restore extent hooks. */ + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL, + (void *)&old_hooks, new_size), 0, "Unexpected extent_hooks error"); + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, + &old_size, NULL, 0), 0, "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks, default_hooks, "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->alloc, default_hooks->alloc, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->dalloc, default_hooks->dalloc, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->commit, default_hooks->commit, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->decommit, default_hooks->decommit, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->purge_lazy, default_hooks->purge_lazy, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->purge_forced, default_hooks->purge_forced, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->split, default_hooks->split, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->merge, default_hooks->merge, + "Unexpected extent_hooks error"); +} + +TEST_BEGIN(test_extent_manual_hook) { + test_manual_hook_auto_arena(); + test_manual_hook_body(); + + /* Test failure paths. */ + try_split = false; + test_manual_hook_body(); + try_merge = false; + test_manual_hook_body(); + try_purge_lazy = false; + try_purge_forced = false; + test_manual_hook_body(); + + try_split = try_merge = try_purge_lazy = try_purge_forced = true; +} +TEST_END + +TEST_BEGIN(test_extent_auto_hook) { + unsigned arena_ind; + size_t new_size, sz; + extent_hooks_t *new_hooks; + + extent_hooks_prep(); + + sz = sizeof(unsigned); + new_hooks = &hooks; + new_size = sizeof(extent_hooks_t *); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, + (void *)&new_hooks, new_size), 0, "Unexpected mallctl() failure"); + + test_skip_if(check_background_thread_enabled()); + test_extent_body(arena_ind); +} +TEST_END + +int +main(void) { + return test( + test_extent_manual_hook, + test_extent_auto_hook); +} diff --git a/deps/jemalloc/test/integration/extent.sh b/deps/jemalloc/test/integration/extent.sh new file mode 100644 index 0000000..0cc2187 --- /dev/null +++ b/deps/jemalloc/test/integration/extent.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="junk:false" +fi diff --git a/deps/jemalloc/test/integration/malloc.c b/deps/jemalloc/test/integration/malloc.c new file mode 100644 index 0000000..8b33bc8 --- /dev/null +++ b/deps/jemalloc/test/integration/malloc.c @@ -0,0 +1,16 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_zero_alloc) { + void *res = malloc(0); + assert(res); + size_t usable = malloc_usable_size(res); + assert(usable > 0); + free(res); +} +TEST_END + +int +main(void) { + return test( + test_zero_alloc); +} diff --git a/deps/jemalloc/test/integration/mallocx.c b/deps/jemalloc/test/integration/mallocx.c new file mode 100644 index 0000000..645d4db --- /dev/null +++ b/deps/jemalloc/test/integration/mallocx.c @@ -0,0 +1,274 @@ +#include "test/jemalloc_test.h" + +static unsigned +get_nsizes_impl(const char *cmd) { + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return ret; +} + +static unsigned +get_nlarge(void) { + return get_nsizes_impl("arenas.nlextents"); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) { + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return ret; +} + +static size_t +get_large_size(size_t ind) { + return get_size_impl("arenas.lextent.0.size", ind); +} + +/* + * On systems which can't merge extents, tests that call this function generate + * a lot of dirty memory very quickly. Purging between cycles mitigates + * potential OOM on e.g. 32-bit Windows. + */ +static void +purge(void) { + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl error"); +} + +/* + * GCC "-Walloc-size-larger-than" warning detects when one of the memory + * allocation functions is called with a size larger than the maximum size that + * they support. Here we want to explicitly test that the allocation functions + * do indeed fail properly when this is the case, which triggers the warning. + * Therefore we disable the warning for these tests. + */ +JEMALLOC_DIAGNOSTIC_PUSH +JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN + +TEST_BEGIN(test_overflow) { + size_t largemax; + + largemax = get_large_size(get_nlarge()-1); + + assert_ptr_null(mallocx(largemax+1, 0), + "Expected OOM for mallocx(size=%#zx, 0)", largemax+1); + + assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0), + "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); + + assert_ptr_null(mallocx(SIZE_T_MAX, 0), + "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX); + + assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), + "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))", + ZU(PTRDIFF_MAX)+1); +} +TEST_END + +static void * +remote_alloc(void *arg) { + unsigned arena; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + size_t large_sz; + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz, + NULL, 0), 0, "Unexpected mallctl failure"); + + void *ptr = mallocx(large_sz, MALLOCX_ARENA(arena) + | MALLOCX_TCACHE_NONE); + void **ret = (void **)arg; + *ret = ptr; + + return NULL; +} + +TEST_BEGIN(test_remote_free) { + thd_t thd; + void *ret; + thd_create(&thd, remote_alloc, (void *)&ret); + thd_join(thd, NULL); + assert_ptr_not_null(ret, "Unexpected mallocx failure"); + + /* Avoid TCACHE_NONE to explicitly test tcache_flush(). */ + dallocx(ret, 0); + mallctl("thread.tcache.flush", NULL, NULL, NULL, 0); +} +TEST_END + +TEST_BEGIN(test_oom) { + size_t largemax; + bool oom; + void *ptrs[3]; + unsigned i; + + /* + * It should be impossible to allocate three objects that each consume + * nearly half the virtual address space. + */ + largemax = get_large_size(get_nlarge()-1); + oom = false; + for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { + ptrs[i] = mallocx(largemax, MALLOCX_ARENA(0)); + if (ptrs[i] == NULL) { + oom = true; + } + } + assert_true(oom, + "Expected OOM during series of calls to mallocx(size=%zu, 0)", + largemax); + for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { + if (ptrs[i] != NULL) { + dallocx(ptrs[i], 0); + } + } + purge(); + +#if LG_SIZEOF_PTR == 3 + assert_ptr_null(mallocx(0x8000000000000000ULL, + MALLOCX_ALIGN(0x8000000000000000ULL)), + "Expected OOM for mallocx()"); + assert_ptr_null(mallocx(0x8000000000000000ULL, + MALLOCX_ALIGN(0x80000000)), + "Expected OOM for mallocx()"); +#else + assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)), + "Expected OOM for mallocx()"); +#endif +} +TEST_END + +/* Re-enable the "-Walloc-size-larger-than=" warning */ +JEMALLOC_DIAGNOSTIC_POP + +TEST_BEGIN(test_basic) { +#define MAXSZ (((size_t)1) << 23) + size_t sz; + + for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { + size_t nsz, rsz; + void *p; + nsz = nallocx(sz, 0); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, 0); + assert_ptr_not_null(p, + "Unexpected mallocx(size=%zx, flags=0) error", sz); + rsz = sallocx(p, 0); + assert_zu_ge(rsz, sz, "Real size smaller than expected"); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); + dallocx(p, 0); + + p = mallocx(sz, 0); + assert_ptr_not_null(p, + "Unexpected mallocx(size=%zx, flags=0) error", sz); + dallocx(p, 0); + + nsz = nallocx(sz, MALLOCX_ZERO); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, MALLOCX_ZERO); + assert_ptr_not_null(p, + "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error", + nsz); + rsz = sallocx(p, 0); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); + dallocx(p, 0); + purge(); + } +#undef MAXSZ +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) { + const char *percpu_arena; + size_t sz = sizeof(percpu_arena); + + if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) || + strcmp(percpu_arena, "disabled") != 0) { + test_skip("test_alignment_and_size skipped: " + "not working with percpu arena."); + }; +#define MAXALIGN (((size_t)1) << 23) +#define NITER 4 + size_t nsz, rsz, alignment, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) { + ps[i] = NULL; + } + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (sz = 1; + sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO | MALLOCX_ARENA(0)); + assert_zu_ne(nsz, 0, + "nallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO | MALLOCX_ARENA(0)); + assert_ptr_not_null(ps[i], + "mallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + rsz = sallocx(ps[i], 0); + assert_zu_ge(rsz, sz, + "Real size smaller than expected for " + "alignment=%zu, size=%zu", alignment, sz); + assert_zu_eq(nsz, rsz, + "nallocx()/sallocx() size mismatch for " + "alignment=%zu, size=%zu", alignment, sz); + assert_ptr_null( + (void *)((uintptr_t)ps[i] & (alignment-1)), + "%p inadequately aligned for" + " alignment=%zu, size=%zu", ps[i], + alignment, sz); + total += rsz; + if (total >= (MAXALIGN << 1)) { + break; + } + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + dallocx(ps[i], 0); + ps[i] = NULL; + } + } + } + purge(); + } +#undef MAXALIGN +#undef NITER +} +TEST_END + +int +main(void) { + return test( + test_overflow, + test_oom, + test_remote_free, + test_basic, + test_alignment_and_size); +} diff --git a/deps/jemalloc/test/integration/mallocx.sh b/deps/jemalloc/test/integration/mallocx.sh new file mode 100644 index 0000000..0cc2187 --- /dev/null +++ b/deps/jemalloc/test/integration/mallocx.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="junk:false" +fi diff --git a/deps/jemalloc/test/integration/overflow.c b/deps/jemalloc/test/integration/overflow.c new file mode 100644 index 0000000..748ebb6 --- /dev/null +++ b/deps/jemalloc/test/integration/overflow.c @@ -0,0 +1,59 @@ +#include "test/jemalloc_test.h" + +/* + * GCC "-Walloc-size-larger-than" warning detects when one of the memory + * allocation functions is called with a size larger than the maximum size that + * they support. Here we want to explicitly test that the allocation functions + * do indeed fail properly when this is the case, which triggers the warning. + * Therefore we disable the warning for these tests. + */ +JEMALLOC_DIAGNOSTIC_PUSH +JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN + +TEST_BEGIN(test_overflow) { + unsigned nlextents; + size_t mib[4]; + size_t sz, miblen, max_size_class; + void *p; + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, + 0), 0, "Unexpected mallctl() error"); + + miblen = sizeof(mib) / sizeof(size_t); + assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib() error"); + mib[2] = nlextents - 1; + + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, + NULL, 0), 0, "Unexpected mallctlbymib() error"); + + assert_ptr_null(malloc(max_size_class + 1), + "Expected OOM due to over-sized allocation request"); + assert_ptr_null(malloc(SIZE_T_MAX), + "Expected OOM due to over-sized allocation request"); + + assert_ptr_null(calloc(1, max_size_class + 1), + "Expected OOM due to over-sized allocation request"); + assert_ptr_null(calloc(1, SIZE_T_MAX), + "Expected OOM due to over-sized allocation request"); + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() OOM"); + assert_ptr_null(realloc(p, max_size_class + 1), + "Expected OOM due to over-sized allocation request"); + assert_ptr_null(realloc(p, SIZE_T_MAX), + "Expected OOM due to over-sized allocation request"); + free(p); +} +TEST_END + +/* Re-enable the "-Walloc-size-larger-than=" warning */ +JEMALLOC_DIAGNOSTIC_POP + +int +main(void) { + return test( + test_overflow); +} diff --git a/deps/jemalloc/test/integration/posix_memalign.c b/deps/jemalloc/test/integration/posix_memalign.c new file mode 100644 index 0000000..d992260 --- /dev/null +++ b/deps/jemalloc/test/integration/posix_memalign.c @@ -0,0 +1,128 @@ +#include "test/jemalloc_test.h" + +#define MAXALIGN (((size_t)1) << 23) + +/* + * On systems which can't merge extents, tests that call this function generate + * a lot of dirty memory very quickly. Purging between cycles mitigates + * potential OOM on e.g. 32-bit Windows. + */ +static void +purge(void) { + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl error"); +} + +TEST_BEGIN(test_alignment_errors) { + size_t alignment; + void *p; + + for (alignment = 0; alignment < sizeof(void *); alignment++) { + assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL, + "Expected error for invalid alignment %zu", + alignment); + } + + for (alignment = sizeof(size_t); alignment < MAXALIGN; + alignment <<= 1) { + assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0, + "Expected error for invalid alignment %zu", + alignment + 1); + } +} +TEST_END + +TEST_BEGIN(test_oom_errors) { + size_t alignment, size; + void *p; + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x8000000000000000); + size = UINT64_C(0x8000000000000000); +#else + alignment = 0x80000000LU; + size = 0x80000000LU; +#endif + assert_d_ne(posix_memalign(&p, alignment, size), 0, + "Expected error for posix_memalign(&p, %zu, %zu)", + alignment, size); + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x4000000000000000); + size = UINT64_C(0xc000000000000001); +#else + alignment = 0x40000000LU; + size = 0xc0000001LU; +#endif + assert_d_ne(posix_memalign(&p, alignment, size), 0, + "Expected error for posix_memalign(&p, %zu, %zu)", + alignment, size); + + alignment = 0x10LU; +#if LG_SIZEOF_PTR == 3 + size = UINT64_C(0xfffffffffffffff0); +#else + size = 0xfffffff0LU; +#endif + assert_d_ne(posix_memalign(&p, alignment, size), 0, + "Expected error for posix_memalign(&p, %zu, %zu)", + alignment, size); +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) { +#define NITER 4 + size_t alignment, size, total; + unsigned i; + int err; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) { + ps[i] = NULL; + } + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (size = 0; + size < 3 * alignment && size < (1U << 31); + size += ((size == 0) ? 1 : + (alignment >> (LG_SIZEOF_PTR-1)) - 1)) { + for (i = 0; i < NITER; i++) { + err = posix_memalign(&ps[i], + alignment, size); + if (err) { + char buf[BUFERROR_BUF]; + + buferror(get_errno(), buf, sizeof(buf)); + test_fail( + "Error for alignment=%zu, " + "size=%zu (%#zx): %s", + alignment, size, size, buf); + } + total += malloc_usable_size(ps[i]); + if (total >= (MAXALIGN << 1)) { + break; + } + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + free(ps[i]); + ps[i] = NULL; + } + } + } + purge(); + } +#undef NITER +} +TEST_END + +int +main(void) { + return test( + test_alignment_errors, + test_oom_errors, + test_alignment_and_size); +} diff --git a/deps/jemalloc/test/integration/rallocx.c b/deps/jemalloc/test/integration/rallocx.c new file mode 100644 index 0000000..08ed08d --- /dev/null +++ b/deps/jemalloc/test/integration/rallocx.c @@ -0,0 +1,258 @@ +#include "test/jemalloc_test.h" + +static unsigned +get_nsizes_impl(const char *cmd) { + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return ret; +} + +static unsigned +get_nlarge(void) { + return get_nsizes_impl("arenas.nlextents"); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) { + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return ret; +} + +static size_t +get_large_size(size_t ind) { + return get_size_impl("arenas.lextent.0.size", ind); +} + +TEST_BEGIN(test_grow_and_shrink) { + void *p, *q; + size_t tsz; +#define NCYCLES 3 + unsigned i, j; +#define NSZS 1024 + size_t szs[NSZS]; +#define MAXSZ ZU(12 * 1024 * 1024) + + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + szs[0] = sallocx(p, 0); + + for (i = 0; i < NCYCLES; i++) { + for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) { + q = rallocx(p, szs[j-1]+1, 0); + assert_ptr_not_null(q, + "Unexpected rallocx() error for size=%zu-->%zu", + szs[j-1], szs[j-1]+1); + szs[j] = sallocx(q, 0); + assert_zu_ne(szs[j], szs[j-1]+1, + "Expected size to be at least: %zu", szs[j-1]+1); + p = q; + } + + for (j--; j > 0; j--) { + q = rallocx(p, szs[j-1], 0); + assert_ptr_not_null(q, + "Unexpected rallocx() error for size=%zu-->%zu", + szs[j], szs[j-1]); + tsz = sallocx(q, 0); + assert_zu_eq(tsz, szs[j-1], + "Expected size=%zu, got size=%zu", szs[j-1], tsz); + p = q; + } + } + + dallocx(p, 0); +#undef MAXSZ +#undef NSZS +#undef NCYCLES +} +TEST_END + +static bool +validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { + bool ret = false; + const uint8_t *buf = (const uint8_t *)p; + size_t i; + + for (i = 0; i < len; i++) { + uint8_t b = buf[offset+i]; + if (b != c) { + test_fail("Allocation at %p (len=%zu) contains %#x " + "rather than %#x at offset %zu", p, len, b, c, + offset+i); + ret = true; + } + } + + return ret; +} + +TEST_BEGIN(test_zero) { + void *p, *q; + size_t psz, qsz, i, j; + size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024}; +#define FILL_BYTE 0xaaU +#define RANGE 2048 + + for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) { + size_t start_size = start_sizes[i]; + p = mallocx(start_size, MALLOCX_ZERO); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + psz = sallocx(p, 0); + + assert_false(validate_fill(p, 0, 0, psz), + "Expected zeroed memory"); + memset(p, FILL_BYTE, psz); + assert_false(validate_fill(p, FILL_BYTE, 0, psz), + "Expected filled memory"); + + for (j = 1; j < RANGE; j++) { + q = rallocx(p, start_size+j, MALLOCX_ZERO); + assert_ptr_not_null(q, "Unexpected rallocx() error"); + qsz = sallocx(q, 0); + if (q != p || qsz != psz) { + assert_false(validate_fill(q, FILL_BYTE, 0, + psz), "Expected filled memory"); + assert_false(validate_fill(q, 0, psz, qsz-psz), + "Expected zeroed memory"); + } + if (psz != qsz) { + memset((void *)((uintptr_t)q+psz), FILL_BYTE, + qsz-psz); + psz = qsz; + } + p = q; + } + assert_false(validate_fill(p, FILL_BYTE, 0, psz), + "Expected filled memory"); + dallocx(p, 0); + } +#undef FILL_BYTE +} +TEST_END + +TEST_BEGIN(test_align) { + void *p, *q; + size_t align; +#define MAX_ALIGN (ZU(1) << 25) + + align = ZU(1); + p = mallocx(1, MALLOCX_ALIGN(align)); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + for (align <<= 1; align <= MAX_ALIGN; align <<= 1) { + q = rallocx(p, 1, MALLOCX_ALIGN(align)); + assert_ptr_not_null(q, + "Unexpected rallocx() error for align=%zu", align); + assert_ptr_null( + (void *)((uintptr_t)q & (align-1)), + "%p inadequately aligned for align=%zu", + q, align); + p = q; + } + dallocx(p, 0); +#undef MAX_ALIGN +} +TEST_END + +TEST_BEGIN(test_lg_align_and_zero) { + void *p, *q; + unsigned lg_align; + size_t sz; +#define MAX_LG_ALIGN 25 +#define MAX_VALIDATE (ZU(1) << 22) + + lg_align = 0; + p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) { + q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); + assert_ptr_not_null(q, + "Unexpected rallocx() error for lg_align=%u", lg_align); + assert_ptr_null( + (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)), + "%p inadequately aligned for lg_align=%u", q, lg_align); + sz = sallocx(q, 0); + if ((sz << 1) <= MAX_VALIDATE) { + assert_false(validate_fill(q, 0, 0, sz), + "Expected zeroed memory"); + } else { + assert_false(validate_fill(q, 0, 0, MAX_VALIDATE), + "Expected zeroed memory"); + assert_false(validate_fill( + (void *)((uintptr_t)q+sz-MAX_VALIDATE), + 0, 0, MAX_VALIDATE), "Expected zeroed memory"); + } + p = q; + } + dallocx(p, 0); +#undef MAX_VALIDATE +#undef MAX_LG_ALIGN +} +TEST_END + +/* + * GCC "-Walloc-size-larger-than" warning detects when one of the memory + * allocation functions is called with a size larger than the maximum size that + * they support. Here we want to explicitly test that the allocation functions + * do indeed fail properly when this is the case, which triggers the warning. + * Therefore we disable the warning for these tests. + */ +JEMALLOC_DIAGNOSTIC_PUSH +JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN + +TEST_BEGIN(test_overflow) { + size_t largemax; + void *p; + + largemax = get_large_size(get_nlarge()-1); + + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_ptr_null(rallocx(p, largemax+1, 0), + "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1); + + assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0), + "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); + + assert_ptr_null(rallocx(p, SIZE_T_MAX, 0), + "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX); + + assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), + "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))", + ZU(PTRDIFF_MAX)+1); + + dallocx(p, 0); +} +TEST_END + +/* Re-enable the "-Walloc-size-larger-than=" warning */ +JEMALLOC_DIAGNOSTIC_POP + +int +main(void) { + return test( + test_grow_and_shrink, + test_zero, + test_align, + test_lg_align_and_zero, + test_overflow); +} diff --git a/deps/jemalloc/test/integration/sdallocx.c b/deps/jemalloc/test/integration/sdallocx.c new file mode 100644 index 0000000..ca01448 --- /dev/null +++ b/deps/jemalloc/test/integration/sdallocx.c @@ -0,0 +1,55 @@ +#include "test/jemalloc_test.h" + +#define MAXALIGN (((size_t)1) << 22) +#define NITER 3 + +TEST_BEGIN(test_basic) { + void *ptr = mallocx(64, 0); + sdallocx(ptr, 64, 0); +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) { + size_t nsz, sz, alignment, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) { + ps[i] = NULL; + } + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (sz = 1; + sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + total += nsz; + if (total >= (MAXALIGN << 1)) { + break; + } + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + sdallocx(ps[i], sz, + MALLOCX_ALIGN(alignment)); + ps[i] = NULL; + } + } + } + } +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_basic, + test_alignment_and_size); +} diff --git a/deps/jemalloc/test/integration/slab_sizes.c b/deps/jemalloc/test/integration/slab_sizes.c new file mode 100644 index 0000000..af250c3 --- /dev/null +++ b/deps/jemalloc/test/integration/slab_sizes.c @@ -0,0 +1,80 @@ +#include "test/jemalloc_test.h" + +/* Note that this test relies on the unusual slab sizes set in slab_sizes.sh. */ + +TEST_BEGIN(test_slab_sizes) { + unsigned nbins; + size_t page; + size_t sizemib[4]; + size_t slabmib[4]; + size_t len; + + len = sizeof(nbins); + assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0, + "nbins mallctl failure"); + + len = sizeof(page); + assert_d_eq(mallctl("arenas.page", &page, &len, NULL, 0), 0, + "page mallctl failure"); + + len = 4; + assert_d_eq(mallctlnametomib("arenas.bin.0.size", sizemib, &len), 0, + "bin size mallctlnametomib failure"); + + len = 4; + assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", slabmib, &len), + 0, "slab size mallctlnametomib failure"); + + size_t biggest_slab_seen = 0; + + for (unsigned i = 0; i < nbins; i++) { + size_t bin_size; + size_t slab_size; + len = sizeof(size_t); + sizemib[2] = i; + slabmib[2] = i; + assert_d_eq(mallctlbymib(sizemib, 4, (void *)&bin_size, &len, + NULL, 0), 0, "bin size mallctlbymib failure"); + + len = sizeof(size_t); + assert_d_eq(mallctlbymib(slabmib, 4, (void *)&slab_size, &len, + NULL, 0), 0, "slab size mallctlbymib failure"); + + if (bin_size < 100) { + /* + * Then we should be as close to 17 as possible. Since + * not all page sizes are valid (because of bitmap + * limitations on the number of items in a slab), we + * should at least make sure that the number of pages + * goes up. + */ + assert_zu_ge(slab_size, biggest_slab_seen, + "Slab sizes should go up"); + biggest_slab_seen = slab_size; + } else if ( + (100 <= bin_size && bin_size < 128) + || (128 < bin_size && bin_size <= 200)) { + assert_zu_eq(slab_size, page, + "Forced-small slabs should be small"); + } else if (bin_size == 128) { + assert_zu_eq(slab_size, 2 * page, + "Forced-2-page slab should be 2 pages"); + } else if (200 < bin_size && bin_size <= 4096) { + assert_zu_ge(slab_size, biggest_slab_seen, + "Slab sizes should go up"); + biggest_slab_seen = slab_size; + } + } + /* + * For any reasonable configuration, 17 pages should be a valid slab + * size for 4096-byte items. + */ + assert_zu_eq(biggest_slab_seen, 17 * page, "Didn't hit page target"); +} +TEST_END + +int +main(void) { + return test( + test_slab_sizes); +} diff --git a/deps/jemalloc/test/integration/slab_sizes.sh b/deps/jemalloc/test/integration/slab_sizes.sh new file mode 100644 index 0000000..07e3db8 --- /dev/null +++ b/deps/jemalloc/test/integration/slab_sizes.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +# Some screwy-looking slab sizes. +export MALLOC_CONF="slab_sizes:1-4096:17|100-200:1|128-128:2" diff --git a/deps/jemalloc/test/integration/smallocx.c b/deps/jemalloc/test/integration/smallocx.c new file mode 100644 index 0000000..2486752 --- /dev/null +++ b/deps/jemalloc/test/integration/smallocx.c @@ -0,0 +1,312 @@ +#include "test/jemalloc_test.h" +#include "jemalloc/jemalloc_macros.h" + +#define STR_HELPER(x) #x +#define STR(x) STR_HELPER(x) + +#ifndef JEMALLOC_VERSION_GID_IDENT + #error "JEMALLOC_VERSION_GID_IDENT not defined" +#endif + +#define JOIN(x, y) x ## y +#define JOIN2(x, y) JOIN(x, y) +#define smallocx JOIN2(smallocx_, JEMALLOC_VERSION_GID_IDENT) + +typedef struct { + void *ptr; + size_t size; +} smallocx_return_t; + +extern smallocx_return_t +smallocx(size_t size, int flags); + +static unsigned +get_nsizes_impl(const char *cmd) { + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return ret; +} + +static unsigned +get_nlarge(void) { + return get_nsizes_impl("arenas.nlextents"); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) { + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return ret; +} + +static size_t +get_large_size(size_t ind) { + return get_size_impl("arenas.lextent.0.size", ind); +} + +/* + * On systems which can't merge extents, tests that call this function generate + * a lot of dirty memory very quickly. Purging between cycles mitigates + * potential OOM on e.g. 32-bit Windows. + */ +static void +purge(void) { + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl error"); +} + +/* + * GCC "-Walloc-size-larger-than" warning detects when one of the memory + * allocation functions is called with a size larger than the maximum size that + * they support. Here we want to explicitly test that the allocation functions + * do indeed fail properly when this is the case, which triggers the warning. + * Therefore we disable the warning for these tests. + */ +JEMALLOC_DIAGNOSTIC_PUSH +JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN + +TEST_BEGIN(test_overflow) { + size_t largemax; + + largemax = get_large_size(get_nlarge()-1); + + assert_ptr_null(smallocx(largemax+1, 0).ptr, + "Expected OOM for smallocx(size=%#zx, 0)", largemax+1); + + assert_ptr_null(smallocx(ZU(PTRDIFF_MAX)+1, 0).ptr, + "Expected OOM for smallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); + + assert_ptr_null(smallocx(SIZE_T_MAX, 0).ptr, + "Expected OOM for smallocx(size=%#zx, 0)", SIZE_T_MAX); + + assert_ptr_null(smallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)).ptr, + "Expected OOM for smallocx(size=1, MALLOCX_ALIGN(%#zx))", + ZU(PTRDIFF_MAX)+1); +} +TEST_END + +static void * +remote_alloc(void *arg) { + unsigned arena; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + size_t large_sz; + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz, + NULL, 0), 0, "Unexpected mallctl failure"); + + smallocx_return_t r + = smallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE); + void *ptr = r.ptr; + assert_zu_eq(r.size, + nallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE), + "Expected smalloc(size,flags).size == nallocx(size,flags)"); + void **ret = (void **)arg; + *ret = ptr; + + return NULL; +} + +TEST_BEGIN(test_remote_free) { + thd_t thd; + void *ret; + thd_create(&thd, remote_alloc, (void *)&ret); + thd_join(thd, NULL); + assert_ptr_not_null(ret, "Unexpected smallocx failure"); + + /* Avoid TCACHE_NONE to explicitly test tcache_flush(). */ + dallocx(ret, 0); + mallctl("thread.tcache.flush", NULL, NULL, NULL, 0); +} +TEST_END + +TEST_BEGIN(test_oom) { + size_t largemax; + bool oom; + void *ptrs[3]; + unsigned i; + + /* + * It should be impossible to allocate three objects that each consume + * nearly half the virtual address space. + */ + largemax = get_large_size(get_nlarge()-1); + oom = false; + for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { + ptrs[i] = smallocx(largemax, 0).ptr; + if (ptrs[i] == NULL) { + oom = true; + } + } + assert_true(oom, + "Expected OOM during series of calls to smallocx(size=%zu, 0)", + largemax); + for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { + if (ptrs[i] != NULL) { + dallocx(ptrs[i], 0); + } + } + purge(); + +#if LG_SIZEOF_PTR == 3 + assert_ptr_null(smallocx(0x8000000000000000ULL, + MALLOCX_ALIGN(0x8000000000000000ULL)).ptr, + "Expected OOM for smallocx()"); + assert_ptr_null(smallocx(0x8000000000000000ULL, + MALLOCX_ALIGN(0x80000000)).ptr, + "Expected OOM for smallocx()"); +#else + assert_ptr_null(smallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)).ptr, + "Expected OOM for smallocx()"); +#endif +} +TEST_END + +/* Re-enable the "-Walloc-size-larger-than=" warning */ +JEMALLOC_DIAGNOSTIC_POP + +TEST_BEGIN(test_basic) { +#define MAXSZ (((size_t)1) << 23) + size_t sz; + + for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { + smallocx_return_t ret; + size_t nsz, rsz, smz; + void *p; + nsz = nallocx(sz, 0); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + ret = smallocx(sz, 0); + p = ret.ptr; + smz = ret.size; + assert_ptr_not_null(p, + "Unexpected smallocx(size=%zx, flags=0) error", sz); + rsz = sallocx(p, 0); + assert_zu_ge(rsz, sz, "Real size smaller than expected"); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); + assert_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch"); + dallocx(p, 0); + + ret = smallocx(sz, 0); + p = ret.ptr; + smz = ret.size; + assert_ptr_not_null(p, + "Unexpected smallocx(size=%zx, flags=0) error", sz); + dallocx(p, 0); + + nsz = nallocx(sz, MALLOCX_ZERO); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + assert_zu_ne(smz, 0, "Unexpected smallocx() error"); + ret = smallocx(sz, MALLOCX_ZERO); + p = ret.ptr; + assert_ptr_not_null(p, + "Unexpected smallocx(size=%zx, flags=MALLOCX_ZERO) error", + nsz); + rsz = sallocx(p, 0); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); + assert_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch"); + dallocx(p, 0); + purge(); + } +#undef MAXSZ +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) { + const char *percpu_arena; + size_t sz = sizeof(percpu_arena); + + if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) || + strcmp(percpu_arena, "disabled") != 0) { + test_skip("test_alignment_and_size skipped: " + "not working with percpu arena."); + }; +#define MAXALIGN (((size_t)1) << 23) +#define NITER 4 + size_t nsz, rsz, smz, alignment, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) { + ps[i] = NULL; + } + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (sz = 1; + sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + assert_zu_ne(nsz, 0, + "nallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + smallocx_return_t ret + = smallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); + ps[i] = ret.ptr; + assert_ptr_not_null(ps[i], + "smallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + rsz = sallocx(ps[i], 0); + smz = ret.size; + assert_zu_ge(rsz, sz, + "Real size smaller than expected for " + "alignment=%zu, size=%zu", alignment, sz); + assert_zu_eq(nsz, rsz, + "nallocx()/sallocx() size mismatch for " + "alignment=%zu, size=%zu", alignment, sz); + assert_zu_eq(nsz, smz, + "nallocx()/smallocx() size mismatch for " + "alignment=%zu, size=%zu", alignment, sz); + assert_ptr_null( + (void *)((uintptr_t)ps[i] & (alignment-1)), + "%p inadequately aligned for" + " alignment=%zu, size=%zu", ps[i], + alignment, sz); + total += rsz; + if (total >= (MAXALIGN << 1)) { + break; + } + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + dallocx(ps[i], 0); + ps[i] = NULL; + } + } + } + purge(); + } +#undef MAXALIGN +#undef NITER +} +TEST_END + +int +main(void) { + return test( + test_overflow, + test_oom, + test_remote_free, + test_basic, + test_alignment_and_size); +} diff --git a/deps/jemalloc/test/integration/smallocx.sh b/deps/jemalloc/test/integration/smallocx.sh new file mode 100644 index 0000000..d07f10f --- /dev/null +++ b/deps/jemalloc/test/integration/smallocx.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="junk:false" +fi diff --git a/deps/jemalloc/test/integration/thread_arena.c b/deps/jemalloc/test/integration/thread_arena.c new file mode 100644 index 0000000..1e5ec05 --- /dev/null +++ b/deps/jemalloc/test/integration/thread_arena.c @@ -0,0 +1,86 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 10 + +void * +thd_start(void *arg) { + unsigned main_arena_ind = *(unsigned *)arg; + void *p; + unsigned arena_ind; + size_t size; + int err; + + p = malloc(1); + assert_ptr_not_null(p, "Error in malloc()"); + free(p); + + size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, + (void *)&main_arena_ind, sizeof(main_arena_ind)))) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); + } + + size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL, + 0))) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); + } + assert_u_eq(arena_ind, main_arena_ind, + "Arena index should be same as for main thread"); + + return NULL; +} + +static void +mallctl_failure(int err) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); +} + +TEST_BEGIN(test_thread_arena) { + void *p; + int err; + thd_t thds[NTHREADS]; + unsigned i; + + p = malloc(1); + assert_ptr_not_null(p, "Error in malloc()"); + + unsigned arena_ind, old_arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Arena creation failure"); + + size_t size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", (void *)&old_arena_ind, &size, + (void *)&arena_ind, sizeof(arena_ind))) != 0) { + mallctl_failure(err); + } + + for (i = 0; i < NTHREADS; i++) { + thd_create(&thds[i], thd_start, + (void *)&arena_ind); + } + + for (i = 0; i < NTHREADS; i++) { + intptr_t join_ret; + thd_join(thds[i], (void *)&join_ret); + assert_zd_eq(join_ret, 0, "Unexpected thread join error"); + } + free(p); +} +TEST_END + +int +main(void) { + return test( + test_thread_arena); +} diff --git a/deps/jemalloc/test/integration/thread_tcache_enabled.c b/deps/jemalloc/test/integration/thread_tcache_enabled.c new file mode 100644 index 0000000..95c9acc --- /dev/null +++ b/deps/jemalloc/test/integration/thread_tcache_enabled.c @@ -0,0 +1,87 @@ +#include "test/jemalloc_test.h" + +void * +thd_start(void *arg) { + bool e0, e1; + size_t sz = sizeof(bool); + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + + if (e0) { + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + } + + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + free(malloc(1)); + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + free(malloc(1)); + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + return NULL; +} + +TEST_BEGIN(test_main_thread) { + thd_start(NULL); +} +TEST_END + +TEST_BEGIN(test_subthread) { + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} +TEST_END + +int +main(void) { + /* Run tests multiple times to check for bad interactions. */ + return test( + test_main_thread, + test_subthread, + test_main_thread, + test_subthread, + test_main_thread); +} diff --git a/deps/jemalloc/test/integration/xallocx.c b/deps/jemalloc/test/integration/xallocx.c new file mode 100644 index 0000000..cd0ca04 --- /dev/null +++ b/deps/jemalloc/test/integration/xallocx.c @@ -0,0 +1,384 @@ +#include "test/jemalloc_test.h" + +/* + * Use a separate arena for xallocx() extension/contraction tests so that + * internal allocation e.g. by heap profiling can't interpose allocations where + * xallocx() would ordinarily be able to extend. + */ +static unsigned +arena_ind(void) { + static unsigned ind = 0; + + if (ind == 0) { + size_t sz = sizeof(ind); + assert_d_eq(mallctl("arenas.create", (void *)&ind, &sz, NULL, + 0), 0, "Unexpected mallctl failure creating arena"); + } + + return ind; +} + +TEST_BEGIN(test_same_size) { + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz, 0, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_extra_no_move) { + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz, sz-42, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_no_move_fail) { + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz + 5, 0, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +static unsigned +get_nsizes_impl(const char *cmd) { + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return ret; +} + +static unsigned +get_nsmall(void) { + return get_nsizes_impl("arenas.nbins"); +} + +static unsigned +get_nlarge(void) { + return get_nsizes_impl("arenas.nlextents"); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) { + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return ret; +} + +static size_t +get_small_size(size_t ind) { + return get_size_impl("arenas.bin.0.size", ind); +} + +static size_t +get_large_size(size_t ind) { + return get_size_impl("arenas.lextent.0.size", ind); +} + +TEST_BEGIN(test_size) { + size_t small0, largemax; + void *p; + + /* Get size classes. */ + small0 = get_small_size(0); + largemax = get_large_size(get_nlarge()-1); + + p = mallocx(small0, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + /* Test smallest supported size. */ + assert_zu_eq(xallocx(p, 1, 0, 0), small0, + "Unexpected xallocx() behavior"); + + /* Test largest supported size. */ + assert_zu_le(xallocx(p, largemax, 0, 0), largemax, + "Unexpected xallocx() behavior"); + + /* Test size overflow. */ + assert_zu_le(xallocx(p, largemax+1, 0, 0), largemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax, + "Unexpected xallocx() behavior"); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_size_extra_overflow) { + size_t small0, largemax; + void *p; + + /* Get size classes. */ + small0 = get_small_size(0); + largemax = get_large_size(get_nlarge()-1); + + p = mallocx(small0, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + /* Test overflows that can be resolved by clamping extra. */ + assert_zu_le(xallocx(p, largemax-1, 2, 0), largemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, largemax, 1, 0), largemax, + "Unexpected xallocx() behavior"); + + /* Test overflow such that largemax-size underflows. */ + assert_zu_le(xallocx(p, largemax+1, 2, 0), largemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, largemax+2, 3, 0), largemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax, + "Unexpected xallocx() behavior"); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_extra_small) { + size_t small0, small1, largemax; + void *p; + + /* Get size classes. */ + small0 = get_small_size(0); + small1 = get_small_size(1); + largemax = get_large_size(get_nlarge()-1); + + p = mallocx(small0, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + assert_zu_eq(xallocx(p, small1, 0, 0), small0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, small1, 0, 0), small0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0, + "Unexpected xallocx() behavior"); + + /* Test size+extra overflow. */ + assert_zu_eq(xallocx(p, small0, largemax - small0 + 1, 0), small0, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0, + "Unexpected xallocx() behavior"); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_extra_large) { + int flags = MALLOCX_ARENA(arena_ind()); + size_t smallmax, large1, large2, large3, largemax; + void *p; + + /* Get size classes. */ + smallmax = get_small_size(get_nsmall()-1); + large1 = get_large_size(1); + large2 = get_large_size(2); + large3 = get_large_size(3); + largemax = get_large_size(get_nlarge()-1); + + p = mallocx(large3, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + assert_zu_eq(xallocx(p, large3, 0, flags), large3, + "Unexpected xallocx() behavior"); + /* Test size decrease with zero extra. */ + assert_zu_ge(xallocx(p, large1, 0, flags), large1, + "Unexpected xallocx() behavior"); + assert_zu_ge(xallocx(p, smallmax, 0, flags), large1, + "Unexpected xallocx() behavior"); + + if (xallocx(p, large3, 0, flags) != large3) { + p = rallocx(p, large3, flags); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + } + /* Test size decrease with non-zero extra. */ + assert_zu_eq(xallocx(p, large1, large3 - large1, flags), large3, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, large2, large3 - large2, flags), large3, + "Unexpected xallocx() behavior"); + assert_zu_ge(xallocx(p, large1, large2 - large1, flags), large2, + "Unexpected xallocx() behavior"); + assert_zu_ge(xallocx(p, smallmax, large1 - smallmax, flags), large1, + "Unexpected xallocx() behavior"); + + assert_zu_ge(xallocx(p, large1, 0, flags), large1, + "Unexpected xallocx() behavior"); + /* Test size increase with zero extra. */ + assert_zu_le(xallocx(p, large3, 0, flags), large3, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, largemax+1, 0, flags), large3, + "Unexpected xallocx() behavior"); + + assert_zu_ge(xallocx(p, large1, 0, flags), large1, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_le(xallocx(p, large1, SIZE_T_MAX - large1, flags), largemax, + "Unexpected xallocx() behavior"); + + assert_zu_ge(xallocx(p, large1, 0, flags), large1, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_le(xallocx(p, large1, large3 - large1, flags), large3, + "Unexpected xallocx() behavior"); + + if (xallocx(p, large3, 0, flags) != large3) { + p = rallocx(p, large3, flags); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + } + /* Test size+extra overflow. */ + assert_zu_le(xallocx(p, large3, largemax - large3 + 1, flags), largemax, + "Unexpected xallocx() behavior"); + + dallocx(p, flags); +} +TEST_END + +static void +print_filled_extents(const void *p, uint8_t c, size_t len) { + const uint8_t *pc = (const uint8_t *)p; + size_t i, range0; + uint8_t c0; + + malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len); + range0 = 0; + c0 = pc[0]; + for (i = 0; i < len; i++) { + if (pc[i] != c0) { + malloc_printf(" %#x[%zu..%zu)", c0, range0, i); + range0 = i; + c0 = pc[i]; + } + } + malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i); +} + +static bool +validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { + const uint8_t *pc = (const uint8_t *)p; + bool err; + size_t i; + + for (i = offset, err = false; i < offset+len; i++) { + if (pc[i] != c) { + err = true; + } + } + + if (err) { + print_filled_extents(p, c, offset + len); + } + + return err; +} + +static void +test_zero(size_t szmin, size_t szmax) { + int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO; + size_t sz, nsz; + void *p; +#define FILL_BYTE 0x7aU + + sz = szmax; + p = mallocx(sz, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu", + sz); + + /* + * Fill with non-zero so that non-debug builds are more likely to detect + * errors. + */ + memset(p, FILL_BYTE, sz); + assert_false(validate_fill(p, FILL_BYTE, 0, sz), + "Memory not filled: sz=%zu", sz); + + /* Shrink in place so that we can expect growing in place to succeed. */ + sz = szmin; + if (xallocx(p, sz, 0, flags) != sz) { + p = rallocx(p, sz, flags); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + } + assert_false(validate_fill(p, FILL_BYTE, 0, sz), + "Memory not filled: sz=%zu", sz); + + for (sz = szmin; sz < szmax; sz = nsz) { + nsz = nallocx(sz+1, flags); + if (xallocx(p, sz+1, 0, flags) != nsz) { + p = rallocx(p, sz+1, flags); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + } + assert_false(validate_fill(p, FILL_BYTE, 0, sz), + "Memory not filled: sz=%zu", sz); + assert_false(validate_fill(p, 0x00, sz, nsz-sz), + "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz); + memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz); + assert_false(validate_fill(p, FILL_BYTE, 0, nsz), + "Memory not filled: nsz=%zu", nsz); + } + + dallocx(p, flags); +} + +TEST_BEGIN(test_zero_large) { + size_t large0, large1; + + /* Get size classes. */ + large0 = get_large_size(0); + large1 = get_large_size(1); + + test_zero(large1, large0 * 2); +} +TEST_END + +int +main(void) { + return test( + test_same_size, + test_extra_no_move, + test_no_move_fail, + test_size, + test_size_extra_overflow, + test_extra_small, + test_extra_large, + test_zero_large); +} diff --git a/deps/jemalloc/test/integration/xallocx.sh b/deps/jemalloc/test/integration/xallocx.sh new file mode 100644 index 0000000..0cc2187 --- /dev/null +++ b/deps/jemalloc/test/integration/xallocx.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="junk:false" +fi diff --git a/deps/jemalloc/test/src/SFMT.c b/deps/jemalloc/test/src/SFMT.c new file mode 100644 index 0000000..c05e218 --- /dev/null +++ b/deps/jemalloc/test/src/SFMT.c @@ -0,0 +1,719 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT.c + * @brief SIMD oriented Fast Mersenne Twister(SFMT) + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software, see LICENSE.txt + */ +#define SFMT_C_ +#include "test/jemalloc_test.h" +#include "test/SFMT-params.h" + +#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64) +#define BIG_ENDIAN64 1 +#endif +#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64) +#define BIG_ENDIAN64 1 +#endif +#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64) +#define BIG_ENDIAN64 1 +#endif +#if defined(ONLY64) && !defined(BIG_ENDIAN64) + #if defined(__GNUC__) + #error "-DONLY64 must be specified with -DBIG_ENDIAN64" + #endif +#undef ONLY64 +#endif +/*------------------------------------------------------ + 128-bit SIMD data type for Altivec, SSE2 or standard C + ------------------------------------------------------*/ +#if defined(HAVE_ALTIVEC) +/** 128-bit data structure */ +union W128_T { + vector unsigned int s; + uint32_t u[4]; +}; +/** 128-bit data type */ +typedef union W128_T w128_t; + +#elif defined(HAVE_SSE2) +/** 128-bit data structure */ +union W128_T { + __m128i si; + uint32_t u[4]; +}; +/** 128-bit data type */ +typedef union W128_T w128_t; + +#else + +/** 128-bit data structure */ +struct W128_T { + uint32_t u[4]; +}; +/** 128-bit data type */ +typedef struct W128_T w128_t; + +#endif + +struct sfmt_s { + /** the 128-bit internal state array */ + w128_t sfmt[N]; + /** index counter to the 32-bit internal state array */ + int idx; + /** a flag: it is 0 if and only if the internal state is not yet + * initialized. */ + int initialized; +}; + +/*-------------------------------------- + FILE GLOBAL VARIABLES + internal state, index counter and flag + --------------------------------------*/ + +/** a parity check vector which certificate the period of 2^{MEXP} */ +static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4}; + +/*---------------- + STATIC FUNCTIONS + ----------------*/ +static inline int idxof(int i); +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +static inline void rshift128(w128_t *out, w128_t const *in, int shift); +static inline void lshift128(w128_t *out, w128_t const *in, int shift); +#endif +static inline void gen_rand_all(sfmt_t *ctx); +static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); +static inline uint32_t func1(uint32_t x); +static inline uint32_t func2(uint32_t x); +static void period_certification(sfmt_t *ctx); +#if defined(BIG_ENDIAN64) && !defined(ONLY64) +static inline void swap(w128_t *array, int size); +#endif + +#if defined(HAVE_ALTIVEC) + #include "test/SFMT-alti.h" +#elif defined(HAVE_SSE2) + #include "test/SFMT-sse2.h" +#endif + +/** + * This function simulate a 64-bit index of LITTLE ENDIAN + * in BIG ENDIAN machine. + */ +#ifdef ONLY64 +static inline int idxof(int i) { + return i ^ 1; +} +#else +static inline int idxof(int i) { + return i; +} +#endif +/** + * This function simulates SIMD 128-bit right shift by the standard C. + * The 128-bit integer given in in is shifted by (shift * 8) bits. + * This function simulates the LITTLE ENDIAN SIMD. + * @param out the output of this function + * @param in the 128-bit data to be shifted + * @param shift the shift value + */ +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +#ifdef ONLY64 +static inline void rshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); + tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); + + oh = th >> (shift * 8); + ol = tl >> (shift * 8); + ol |= th << (64 - shift * 8); + out->u[0] = (uint32_t)(ol >> 32); + out->u[1] = (uint32_t)ol; + out->u[2] = (uint32_t)(oh >> 32); + out->u[3] = (uint32_t)oh; +} +#else +static inline void rshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); + tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); + + oh = th >> (shift * 8); + ol = tl >> (shift * 8); + ol |= th << (64 - shift * 8); + out->u[1] = (uint32_t)(ol >> 32); + out->u[0] = (uint32_t)ol; + out->u[3] = (uint32_t)(oh >> 32); + out->u[2] = (uint32_t)oh; +} +#endif +/** + * This function simulates SIMD 128-bit left shift by the standard C. + * The 128-bit integer given in in is shifted by (shift * 8) bits. + * This function simulates the LITTLE ENDIAN SIMD. + * @param out the output of this function + * @param in the 128-bit data to be shifted + * @param shift the shift value + */ +#ifdef ONLY64 +static inline void lshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); + tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); + + oh = th << (shift * 8); + ol = tl << (shift * 8); + oh |= tl >> (64 - shift * 8); + out->u[0] = (uint32_t)(ol >> 32); + out->u[1] = (uint32_t)ol; + out->u[2] = (uint32_t)(oh >> 32); + out->u[3] = (uint32_t)oh; +} +#else +static inline void lshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); + tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); + + oh = th << (shift * 8); + ol = tl << (shift * 8); + oh |= tl >> (64 - shift * 8); + out->u[1] = (uint32_t)(ol >> 32); + out->u[0] = (uint32_t)ol; + out->u[3] = (uint32_t)(oh >> 32); + out->u[2] = (uint32_t)oh; +} +#endif +#endif + +/** + * This function represents the recursion formula. + * @param r output + * @param a a 128-bit part of the internal state array + * @param b a 128-bit part of the internal state array + * @param c a 128-bit part of the internal state array + * @param d a 128-bit part of the internal state array + */ +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +#ifdef ONLY64 +static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, + w128_t *d) { + w128_t x; + w128_t y; + + lshift128(&x, a, SL2); + rshift128(&y, c, SR2); + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] + ^ (d->u[0] << SL1); + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] + ^ (d->u[1] << SL1); + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] + ^ (d->u[2] << SL1); + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] + ^ (d->u[3] << SL1); +} +#else +static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, + w128_t *d) { + w128_t x; + w128_t y; + + lshift128(&x, a, SL2); + rshift128(&y, c, SR2); + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] + ^ (d->u[0] << SL1); + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] + ^ (d->u[1] << SL1); + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] + ^ (d->u[2] << SL1); + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] + ^ (d->u[3] << SL1); +} +#endif +#endif + +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +/** + * This function fills the internal state array with pseudorandom + * integers. + */ +static inline void gen_rand_all(sfmt_t *ctx) { + int i; + w128_t *r1, *r2; + + r1 = &ctx->sfmt[N - 2]; + r2 = &ctx->sfmt[N - 1]; + for (i = 0; i < N - POS1; i++) { + do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, + r2); + r1 = r2; + r2 = &ctx->sfmt[i]; + } + for (; i < N; i++) { + do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1, + r2); + r1 = r2; + r2 = &ctx->sfmt[i]; + } +} + +/** + * This function fills the user-specified array with pseudorandom + * integers. + * + * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param size number of 128-bit pseudorandom numbers to be generated. + */ +static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + w128_t *r1, *r2; + + r1 = &ctx->sfmt[N - 2]; + r2 = &ctx->sfmt[N - 1]; + for (i = 0; i < N - POS1; i++) { + do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (; i < N; i++) { + do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (; i < size - N; i++) { + do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (j = 0; j < 2 * N - size; j++) { + ctx->sfmt[j] = array[j + size - N]; + } + for (; i < size; i++, j++) { + do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + ctx->sfmt[j] = array[i]; + } +} +#endif + +#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC) +static inline void swap(w128_t *array, int size) { + int i; + uint32_t x, y; + + for (i = 0; i < size; i++) { + x = array[i].u[0]; + y = array[i].u[2]; + array[i].u[0] = array[i].u[1]; + array[i].u[2] = array[i].u[3]; + array[i].u[1] = x; + array[i].u[3] = y; + } +} +#endif +/** + * This function represents a function used in the initialization + * by init_by_array + * @param x 32-bit integer + * @return 32-bit integer + */ +static uint32_t func1(uint32_t x) { + return (x ^ (x >> 27)) * (uint32_t)1664525UL; +} + +/** + * This function represents a function used in the initialization + * by init_by_array + * @param x 32-bit integer + * @return 32-bit integer + */ +static uint32_t func2(uint32_t x) { + return (x ^ (x >> 27)) * (uint32_t)1566083941UL; +} + +/** + * This function certificate the period of 2^{MEXP} + */ +static void period_certification(sfmt_t *ctx) { + int inner = 0; + int i, j; + uint32_t work; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; + + for (i = 0; i < 4; i++) + inner ^= psfmt32[idxof(i)] & parity[i]; + for (i = 16; i > 0; i >>= 1) + inner ^= inner >> i; + inner &= 1; + /* check OK */ + if (inner == 1) { + return; + } + /* check NG, and modification */ + for (i = 0; i < 4; i++) { + work = 1; + for (j = 0; j < 32; j++) { + if ((work & parity[i]) != 0) { + psfmt32[idxof(i)] ^= work; + return; + } + work = work << 1; + } + } +} + +/*---------------- + PUBLIC FUNCTIONS + ----------------*/ +/** + * This function returns the identification string. + * The string shows the word size, the Mersenne exponent, + * and all parameters of this generator. + */ +const char *get_idstring(void) { + return IDSTR; +} + +/** + * This function returns the minimum size of array used for \b + * fill_array32() function. + * @return minimum size of array used for fill_array32() function. + */ +int get_min_array_size32(void) { + return N32; +} + +/** + * This function returns the minimum size of array used for \b + * fill_array64() function. + * @return minimum size of array used for fill_array64() function. + */ +int get_min_array_size64(void) { + return N64; +} + +#ifndef ONLY64 +/** + * This function generates and returns 32-bit pseudorandom number. + * init_gen_rand or init_by_array must be called before this function. + * @return 32-bit pseudorandom number + */ +uint32_t gen_rand32(sfmt_t *ctx) { + uint32_t r; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; + + assert(ctx->initialized); + if (ctx->idx >= N32) { + gen_rand_all(ctx); + ctx->idx = 0; + } + r = psfmt32[ctx->idx++]; + return r; +} + +/* Generate a random integer in [0..limit). */ +uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) { + uint32_t ret, above; + + above = 0xffffffffU - (0xffffffffU % limit); + while (1) { + ret = gen_rand32(ctx); + if (ret < above) { + ret %= limit; + break; + } + } + return ret; +} +#endif +/** + * This function generates and returns 64-bit pseudorandom number. + * init_gen_rand or init_by_array must be called before this function. + * The function gen_rand64 should not be called after gen_rand32, + * unless an initialization is again executed. + * @return 64-bit pseudorandom number + */ +uint64_t gen_rand64(sfmt_t *ctx) { +#if defined(BIG_ENDIAN64) && !defined(ONLY64) + uint32_t r1, r2; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; +#else + uint64_t r; + uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0]; +#endif + + assert(ctx->initialized); + assert(ctx->idx % 2 == 0); + + if (ctx->idx >= N32) { + gen_rand_all(ctx); + ctx->idx = 0; + } +#if defined(BIG_ENDIAN64) && !defined(ONLY64) + r1 = psfmt32[ctx->idx]; + r2 = psfmt32[ctx->idx + 1]; + ctx->idx += 2; + return ((uint64_t)r2 << 32) | r1; +#else + r = psfmt64[ctx->idx / 2]; + ctx->idx += 2; + return r; +#endif +} + +/* Generate a random integer in [0..limit). */ +uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) { + uint64_t ret, above; + + above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit); + while (1) { + ret = gen_rand64(ctx); + if (ret < above) { + ret %= limit; + break; + } + } + return ret; +} + +#ifndef ONLY64 +/** + * This function generates pseudorandom 32-bit integers in the + * specified array[] by one call. The number of pseudorandom integers + * is specified by the argument size, which must be at least 624 and a + * multiple of four. The generation by this function is much faster + * than the following gen_rand function. + * + * For initialization, init_gen_rand or init_by_array must be called + * before the first call of this function. This function can not be + * used after calling gen_rand function, without initialization. + * + * @param array an array where pseudorandom 32-bit integers are filled + * by this function. The pointer to the array must be \b "aligned" + * (namely, must be a multiple of 16) in the SIMD version, since it + * refers to the address of a 128-bit integer. In the standard C + * version, the pointer is arbitrary. + * + * @param size the number of 32-bit pseudorandom integers to be + * generated. size must be a multiple of 4, and greater than or equal + * to (MEXP / 128 + 1) * 4. + * + * @note \b memalign or \b posix_memalign is available to get aligned + * memory. Mac OSX doesn't have these functions, but \b malloc of OSX + * returns the pointer to the aligned memory block. + */ +void fill_array32(sfmt_t *ctx, uint32_t *array, int size) { + assert(ctx->initialized); + assert(ctx->idx == N32); + assert(size % 4 == 0); + assert(size >= N32); + + gen_rand_array(ctx, (w128_t *)array, size / 4); + ctx->idx = N32; +} +#endif + +/** + * This function generates pseudorandom 64-bit integers in the + * specified array[] by one call. The number of pseudorandom integers + * is specified by the argument size, which must be at least 312 and a + * multiple of two. The generation by this function is much faster + * than the following gen_rand function. + * + * For initialization, init_gen_rand or init_by_array must be called + * before the first call of this function. This function can not be + * used after calling gen_rand function, without initialization. + * + * @param array an array where pseudorandom 64-bit integers are filled + * by this function. The pointer to the array must be "aligned" + * (namely, must be a multiple of 16) in the SIMD version, since it + * refers to the address of a 128-bit integer. In the standard C + * version, the pointer is arbitrary. + * + * @param size the number of 64-bit pseudorandom integers to be + * generated. size must be a multiple of 2, and greater than or equal + * to (MEXP / 128 + 1) * 2 + * + * @note \b memalign or \b posix_memalign is available to get aligned + * memory. Mac OSX doesn't have these functions, but \b malloc of OSX + * returns the pointer to the aligned memory block. + */ +void fill_array64(sfmt_t *ctx, uint64_t *array, int size) { + assert(ctx->initialized); + assert(ctx->idx == N32); + assert(size % 2 == 0); + assert(size >= N64); + + gen_rand_array(ctx, (w128_t *)array, size / 2); + ctx->idx = N32; + +#if defined(BIG_ENDIAN64) && !defined(ONLY64) + swap((w128_t *)array, size /2); +#endif +} + +/** + * This function initializes the internal state array with a 32-bit + * integer seed. + * + * @param seed a 32-bit integer used as the seed. + */ +sfmt_t *init_gen_rand(uint32_t seed) { + void *p; + sfmt_t *ctx; + int i; + uint32_t *psfmt32; + + if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { + return NULL; + } + ctx = (sfmt_t *)p; + psfmt32 = &ctx->sfmt[0].u[0]; + + psfmt32[idxof(0)] = seed; + for (i = 1; i < N32; i++) { + psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] + ^ (psfmt32[idxof(i - 1)] >> 30)) + + i; + } + ctx->idx = N32; + period_certification(ctx); + ctx->initialized = 1; + + return ctx; +} + +/** + * This function initializes the internal state array, + * with an array of 32-bit integers used as the seeds + * @param init_key the array of 32-bit integers, used as a seed. + * @param key_length the length of init_key. + */ +sfmt_t *init_by_array(uint32_t *init_key, int key_length) { + void *p; + sfmt_t *ctx; + int i, j, count; + uint32_t r; + int lag; + int mid; + int size = N * 4; + uint32_t *psfmt32; + + if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { + return NULL; + } + ctx = (sfmt_t *)p; + psfmt32 = &ctx->sfmt[0].u[0]; + + if (size >= 623) { + lag = 11; + } else if (size >= 68) { + lag = 7; + } else if (size >= 39) { + lag = 5; + } else { + lag = 3; + } + mid = (size - lag) / 2; + + memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt)); + if (key_length + 1 > N32) { + count = key_length + 1; + } else { + count = N32; + } + r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] + ^ psfmt32[idxof(N32 - 1)]); + psfmt32[idxof(mid)] += r; + r += key_length; + psfmt32[idxof(mid + lag)] += r; + psfmt32[idxof(0)] = r; + + count--; + for (i = 1, j = 0; (j < count) && (j < key_length); j++) { + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + ^ psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] += r; + r += init_key[j] + i; + psfmt32[idxof((i + mid + lag) % N32)] += r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + for (; j < count; j++) { + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + ^ psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] += r; + r += i; + psfmt32[idxof((i + mid + lag) % N32)] += r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + for (j = 0; j < N32; j++) { + r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] + + psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] ^= r; + r -= i; + psfmt32[idxof((i + mid + lag) % N32)] ^= r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + + ctx->idx = N32; + period_certification(ctx); + ctx->initialized = 1; + + return ctx; +} + +void fini_gen_rand(sfmt_t *ctx) { + assert(ctx != NULL); + + ctx->initialized = 0; + free(ctx); +} diff --git a/deps/jemalloc/test/src/btalloc.c b/deps/jemalloc/test/src/btalloc.c new file mode 100644 index 0000000..d570952 --- /dev/null +++ b/deps/jemalloc/test/src/btalloc.c @@ -0,0 +1,6 @@ +#include "test/jemalloc_test.h" + +void * +btalloc(size_t size, unsigned bits) { + return btalloc_0(size, bits); +} diff --git a/deps/jemalloc/test/src/btalloc_0.c b/deps/jemalloc/test/src/btalloc_0.c new file mode 100644 index 0000000..77d8904 --- /dev/null +++ b/deps/jemalloc/test/src/btalloc_0.c @@ -0,0 +1,3 @@ +#include "test/jemalloc_test.h" + +btalloc_n_gen(0) diff --git a/deps/jemalloc/test/src/btalloc_1.c b/deps/jemalloc/test/src/btalloc_1.c new file mode 100644 index 0000000..4c126c3 --- /dev/null +++ b/deps/jemalloc/test/src/btalloc_1.c @@ -0,0 +1,3 @@ +#include "test/jemalloc_test.h" + +btalloc_n_gen(1) diff --git a/deps/jemalloc/test/src/math.c b/deps/jemalloc/test/src/math.c new file mode 100644 index 0000000..1758c67 --- /dev/null +++ b/deps/jemalloc/test/src/math.c @@ -0,0 +1,2 @@ +#define MATH_C_ +#include "test/jemalloc_test.h" diff --git a/deps/jemalloc/test/src/mq.c b/deps/jemalloc/test/src/mq.c new file mode 100644 index 0000000..9b5f672 --- /dev/null +++ b/deps/jemalloc/test/src/mq.c @@ -0,0 +1,27 @@ +#include "test/jemalloc_test.h" + +/* + * Sleep for approximately ns nanoseconds. No lower *nor* upper bound on sleep + * time is guaranteed. + */ +void +mq_nanosleep(unsigned ns) { + assert(ns <= 1000*1000*1000); + +#ifdef _WIN32 + Sleep(ns / 1000); +#else + { + struct timespec timeout; + + if (ns < 1000*1000*1000) { + timeout.tv_sec = 0; + timeout.tv_nsec = ns; + } else { + timeout.tv_sec = 1; + timeout.tv_nsec = 0; + } + nanosleep(&timeout, NULL); + } +#endif +} diff --git a/deps/jemalloc/test/src/mtx.c b/deps/jemalloc/test/src/mtx.c new file mode 100644 index 0000000..d9ce375 --- /dev/null +++ b/deps/jemalloc/test/src/mtx.c @@ -0,0 +1,61 @@ +#include "test/jemalloc_test.h" + +#ifndef _CRT_SPINCOUNT +#define _CRT_SPINCOUNT 4000 +#endif + +bool +mtx_init(mtx_t *mtx) { +#ifdef _WIN32 + if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, + _CRT_SPINCOUNT)) { + return true; + } +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + mtx->lock = OS_UNFAIR_LOCK_INIT; +#else + pthread_mutexattr_t attr; + + if (pthread_mutexattr_init(&attr) != 0) { + return true; + } + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT); + if (pthread_mutex_init(&mtx->lock, &attr) != 0) { + pthread_mutexattr_destroy(&attr); + return true; + } + pthread_mutexattr_destroy(&attr); +#endif + return false; +} + +void +mtx_fini(mtx_t *mtx) { +#ifdef _WIN32 +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) +#else + pthread_mutex_destroy(&mtx->lock); +#endif +} + +void +mtx_lock(mtx_t *mtx) { +#ifdef _WIN32 + EnterCriticalSection(&mtx->lock); +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_lock(&mtx->lock); +#else + pthread_mutex_lock(&mtx->lock); +#endif +} + +void +mtx_unlock(mtx_t *mtx) { +#ifdef _WIN32 + LeaveCriticalSection(&mtx->lock); +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_unlock(&mtx->lock); +#else + pthread_mutex_unlock(&mtx->lock); +#endif +} diff --git a/deps/jemalloc/test/src/test.c b/deps/jemalloc/test/src/test.c new file mode 100644 index 0000000..f97ce4d --- /dev/null +++ b/deps/jemalloc/test/src/test.c @@ -0,0 +1,234 @@ +#include "test/jemalloc_test.h" + +/* Test status state. */ + +static unsigned test_count = 0; +static test_status_t test_counts[test_status_count] = {0, 0, 0}; +static test_status_t test_status = test_status_pass; +static const char * test_name = ""; + +/* Reentrancy testing helpers. */ + +#define NUM_REENTRANT_ALLOCS 20 +typedef enum { + non_reentrant = 0, + libc_reentrant = 1, + arena_new_reentrant = 2 +} reentrancy_t; +static reentrancy_t reentrancy; + +static bool libc_hook_ran = false; +static bool arena_new_hook_ran = false; + +static const char * +reentrancy_t_str(reentrancy_t r) { + switch (r) { + case non_reentrant: + return "non-reentrant"; + case libc_reentrant: + return "libc-reentrant"; + case arena_new_reentrant: + return "arena_new-reentrant"; + default: + unreachable(); + } +} + +static void +do_hook(bool *hook_ran, void (**hook)()) { + *hook_ran = true; + *hook = NULL; + + size_t alloc_size = 1; + for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) { + free(malloc(alloc_size)); + alloc_size *= 2; + } +} + +static void +libc_reentrancy_hook() { + do_hook(&libc_hook_ran, &test_hooks_libc_hook); +} + +static void +arena_new_reentrancy_hook() { + do_hook(&arena_new_hook_ran, &test_hooks_arena_new_hook); +} + +/* Actual test infrastructure. */ +bool +test_is_reentrant() { + return reentrancy != non_reentrant; +} + +JEMALLOC_FORMAT_PRINTF(1, 2) +void +test_skip(const char *format, ...) { + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); + malloc_printf("\n"); + test_status = test_status_skip; +} + +JEMALLOC_FORMAT_PRINTF(1, 2) +void +test_fail(const char *format, ...) { + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); + malloc_printf("\n"); + test_status = test_status_fail; +} + +static const char * +test_status_string(test_status_t test_status) { + switch (test_status) { + case test_status_pass: return "pass"; + case test_status_skip: return "skip"; + case test_status_fail: return "fail"; + default: not_reached(); + } +} + +void +p_test_init(const char *name) { + test_count++; + test_status = test_status_pass; + test_name = name; +} + +void +p_test_fini(void) { + test_counts[test_status]++; + malloc_printf("%s (%s): %s\n", test_name, reentrancy_t_str(reentrancy), + test_status_string(test_status)); +} + +static void +check_global_slow(test_status_t *status) { +#ifdef JEMALLOC_UNIT_TEST + /* + * This check needs to peek into tsd internals, which is why it's only + * exposed in unit tests. + */ + if (tsd_global_slow()) { + malloc_printf("Testing increased global slow count\n"); + *status = test_status_fail; + } +#endif +} + +static test_status_t +p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) { + test_status_t ret; + + if (do_malloc_init) { + /* + * Make sure initialization occurs prior to running tests. + * Tests are special because they may use internal facilities + * prior to triggering initialization as a side effect of + * calling into the public API. + */ + if (nallocx(1, 0) == 0) { + malloc_printf("Initialization error"); + return test_status_fail; + } + } + + ret = test_status_pass; + for (; t != NULL; t = va_arg(ap, test_t *)) { + /* Non-reentrant run. */ + reentrancy = non_reentrant; + test_hooks_arena_new_hook = test_hooks_libc_hook = NULL; + t(); + if (test_status > ret) { + ret = test_status; + } + check_global_slow(&ret); + /* Reentrant run. */ + if (do_reentrant) { + reentrancy = libc_reentrant; + test_hooks_arena_new_hook = NULL; + test_hooks_libc_hook = &libc_reentrancy_hook; + t(); + if (test_status > ret) { + ret = test_status; + } + check_global_slow(&ret); + + reentrancy = arena_new_reentrant; + test_hooks_libc_hook = NULL; + test_hooks_arena_new_hook = &arena_new_reentrancy_hook; + t(); + if (test_status > ret) { + ret = test_status; + } + check_global_slow(&ret); + } + } + + malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n", + test_status_string(test_status_pass), + test_counts[test_status_pass], test_count, + test_status_string(test_status_skip), + test_counts[test_status_skip], test_count, + test_status_string(test_status_fail), + test_counts[test_status_fail], test_count); + + return ret; +} + +test_status_t +p_test(test_t *t, ...) { + test_status_t ret; + va_list ap; + + ret = test_status_pass; + va_start(ap, t); + ret = p_test_impl(true, true, t, ap); + va_end(ap); + + return ret; +} + +test_status_t +p_test_no_reentrancy(test_t *t, ...) { + test_status_t ret; + va_list ap; + + ret = test_status_pass; + va_start(ap, t); + ret = p_test_impl(true, false, t, ap); + va_end(ap); + + return ret; +} + +test_status_t +p_test_no_malloc_init(test_t *t, ...) { + test_status_t ret; + va_list ap; + + ret = test_status_pass; + va_start(ap, t); + /* + * We also omit reentrancy from bootstrapping tests, since we don't + * (yet) care about general reentrancy during bootstrapping. + */ + ret = p_test_impl(false, false, t, ap); + va_end(ap); + + return ret; +} + +void +p_test_fail(const char *prefix, const char *message) { + malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message); + test_status = test_status_fail; +} diff --git a/deps/jemalloc/test/src/thd.c b/deps/jemalloc/test/src/thd.c new file mode 100644 index 0000000..9a15eab --- /dev/null +++ b/deps/jemalloc/test/src/thd.c @@ -0,0 +1,34 @@ +#include "test/jemalloc_test.h" + +#ifdef _WIN32 +void +thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { + LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; + *thd = CreateThread(NULL, 0, routine, arg, 0, NULL); + if (*thd == NULL) { + test_fail("Error in CreateThread()\n"); + } +} + +void +thd_join(thd_t thd, void **ret) { + if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) { + DWORD exit_code; + GetExitCodeThread(thd, (LPDWORD) &exit_code); + *ret = (void *)(uintptr_t)exit_code; + } +} + +#else +void +thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { + if (pthread_create(thd, NULL, proc, arg) != 0) { + test_fail("Error in pthread_create()\n"); + } +} + +void +thd_join(thd_t thd, void **ret) { + pthread_join(thd, ret); +} +#endif diff --git a/deps/jemalloc/test/src/timer.c b/deps/jemalloc/test/src/timer.c new file mode 100644 index 0000000..c451c63 --- /dev/null +++ b/deps/jemalloc/test/src/timer.c @@ -0,0 +1,56 @@ +#include "test/jemalloc_test.h" + +void +timer_start(timedelta_t *timer) { + nstime_init(&timer->t0, 0); + nstime_update(&timer->t0); +} + +void +timer_stop(timedelta_t *timer) { + nstime_copy(&timer->t1, &timer->t0); + nstime_update(&timer->t1); +} + +uint64_t +timer_usec(const timedelta_t *timer) { + nstime_t delta; + + nstime_copy(&delta, &timer->t1); + nstime_subtract(&delta, &timer->t0); + return nstime_ns(&delta) / 1000; +} + +void +timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) { + uint64_t t0 = timer_usec(a); + uint64_t t1 = timer_usec(b); + uint64_t mult; + size_t i = 0; + size_t j, n; + + /* Whole. */ + n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1); + i += n; + if (i >= buflen) { + return; + } + mult = 1; + for (j = 0; j < n; j++) { + mult *= 10; + } + + /* Decimal. */ + n = malloc_snprintf(&buf[i], buflen-i, "."); + i += n; + + /* Fraction. */ + while (i < buflen-1) { + uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10 + >= 5)) ? 1 : 0; + n = malloc_snprintf(&buf[i], buflen-i, + "%"FMTu64, (t0 * mult / t1) % 10 + round); + i += n; + mult *= 10; + } +} diff --git a/deps/jemalloc/test/stress/hookbench.c b/deps/jemalloc/test/stress/hookbench.c new file mode 100644 index 0000000..97e90b0 --- /dev/null +++ b/deps/jemalloc/test/stress/hookbench.c @@ -0,0 +1,73 @@ +#include "test/jemalloc_test.h" + +static void +noop_alloc_hook(void *extra, hook_alloc_t type, void *result, + uintptr_t result_raw, uintptr_t args_raw[3]) { +} + +static void +noop_dalloc_hook(void *extra, hook_dalloc_t type, void *address, + uintptr_t args_raw[3]) { +} + +static void +noop_expand_hook(void *extra, hook_expand_t type, void *address, + size_t old_usize, size_t new_usize, uintptr_t result_raw, + uintptr_t args_raw[4]) { +} + +static void +malloc_free_loop(int iters) { + for (int i = 0; i < iters; i++) { + void *p = mallocx(1, 0); + free(p); + } +} + +static void +test_hooked(int iters) { + hooks_t hooks = {&noop_alloc_hook, &noop_dalloc_hook, &noop_expand_hook, + NULL}; + + int err; + void *handles[HOOK_MAX]; + size_t sz = sizeof(handles[0]); + + for (int i = 0; i < HOOK_MAX; i++) { + err = mallctl("experimental.hooks.install", &handles[i], + &sz, &hooks, sizeof(hooks)); + assert(err == 0); + + timedelta_t timer; + timer_start(&timer); + malloc_free_loop(iters); + timer_stop(&timer); + malloc_printf("With %d hook%s: %"FMTu64"us\n", i + 1, + i + 1 == 1 ? "" : "s", timer_usec(&timer)); + } + for (int i = 0; i < HOOK_MAX; i++) { + err = mallctl("experimental.hooks.remove", NULL, NULL, + &handles[i], sizeof(handles[i])); + assert(err == 0); + } +} + +static void +test_unhooked(int iters) { + timedelta_t timer; + timer_start(&timer); + malloc_free_loop(iters); + timer_stop(&timer); + + malloc_printf("Without hooks: %"FMTu64"us\n", timer_usec(&timer)); +} + +int +main(void) { + /* Initialize */ + free(mallocx(1, 0)); + int iters = 10 * 1000 * 1000; + malloc_printf("Benchmarking hooks with %d iterations:\n", iters); + test_hooked(iters); + test_unhooked(iters); +} diff --git a/deps/jemalloc/test/stress/microbench.c b/deps/jemalloc/test/stress/microbench.c new file mode 100644 index 0000000..988b793 --- /dev/null +++ b/deps/jemalloc/test/stress/microbench.c @@ -0,0 +1,165 @@ +#include "test/jemalloc_test.h" + +static inline void +time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, + void (*func)(void)) { + uint64_t i; + + for (i = 0; i < nwarmup; i++) { + func(); + } + timer_start(timer); + for (i = 0; i < niter; i++) { + func(); + } + timer_stop(timer); +} + +void +compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a, + void (*func_a), const char *name_b, void (*func_b)) { + timedelta_t timer_a, timer_b; + char ratio_buf[6]; + void *p; + + p = mallocx(1, 0); + if (p == NULL) { + test_fail("Unexpected mallocx() failure"); + return; + } + + time_func(&timer_a, nwarmup, niter, func_a); + time_func(&timer_b, nwarmup, niter, func_b); + + timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf)); + malloc_printf("%"FMTu64" iterations, %s=%"FMTu64"us, " + "%s=%"FMTu64"us, ratio=1:%s\n", + niter, name_a, timer_usec(&timer_a), name_b, timer_usec(&timer_b), + ratio_buf); + + dallocx(p, 0); +} + +static void +malloc_free(void) { + /* The compiler can optimize away free(malloc(1))! */ + void *p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + free(p); +} + +static void +mallocx_free(void) { + void *p = mallocx(1, 0); + if (p == NULL) { + test_fail("Unexpected mallocx() failure"); + return; + } + free(p); +} + +TEST_BEGIN(test_malloc_vs_mallocx) { + compare_funcs(10*1000*1000, 100*1000*1000, "malloc", + malloc_free, "mallocx", mallocx_free); +} +TEST_END + +static void +malloc_dallocx(void) { + void *p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + dallocx(p, 0); +} + +static void +malloc_sdallocx(void) { + void *p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + sdallocx(p, 1, 0); +} + +TEST_BEGIN(test_free_vs_dallocx) { + compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free, + "dallocx", malloc_dallocx); +} +TEST_END + +TEST_BEGIN(test_dallocx_vs_sdallocx) { + compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx, + "sdallocx", malloc_sdallocx); +} +TEST_END + +static void +malloc_mus_free(void) { + void *p; + + p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + malloc_usable_size(p); + free(p); +} + +static void +malloc_sallocx_free(void) { + void *p; + + p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + if (sallocx(p, 0) < 1) { + test_fail("Unexpected sallocx() failure"); + } + free(p); +} + +TEST_BEGIN(test_mus_vs_sallocx) { + compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size", + malloc_mus_free, "sallocx", malloc_sallocx_free); +} +TEST_END + +static void +malloc_nallocx_free(void) { + void *p; + + p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + if (nallocx(1, 0) < 1) { + test_fail("Unexpected nallocx() failure"); + } + free(p); +} + +TEST_BEGIN(test_sallocx_vs_nallocx) { + compare_funcs(10*1000*1000, 100*1000*1000, "sallocx", + malloc_sallocx_free, "nallocx", malloc_nallocx_free); +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_malloc_vs_mallocx, + test_free_vs_dallocx, + test_dallocx_vs_sdallocx, + test_mus_vs_sallocx, + test_sallocx_vs_nallocx); +} diff --git a/deps/jemalloc/test/test.sh.in b/deps/jemalloc/test/test.sh.in new file mode 100644 index 0000000..39302ff --- /dev/null +++ b/deps/jemalloc/test/test.sh.in @@ -0,0 +1,80 @@ +#!/bin/sh + +case @abi@ in + macho) + export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib" + ;; + pecoff) + export PATH="${PATH}:@objroot@lib" + ;; + *) + ;; +esac + +# Make a copy of the @JEMALLOC_CPREFIX@MALLOC_CONF passed in to this script, so +# it can be repeatedly concatenated with per test settings. +export MALLOC_CONF_ALL=${@JEMALLOC_CPREFIX@MALLOC_CONF} +# Concatenate the individual test's MALLOC_CONF and MALLOC_CONF_ALL. +export_malloc_conf() { + if [ "x${MALLOC_CONF}" != "x" -a "x${MALLOC_CONF_ALL}" != "x" ] ; then + export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF},${MALLOC_CONF_ALL}" + else + export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF}${MALLOC_CONF_ALL}" + fi +} + +# Corresponds to test_status_t. +pass_code=0 +skip_code=1 +fail_code=2 + +pass_count=0 +skip_count=0 +fail_count=0 +for t in $@; do + if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then + echo + fi + echo "=== ${t} ===" + if [ -e "@srcroot@${t}.sh" ] ; then + # Source the shell script corresponding to the test in a subshell and + # execute the test. This allows the shell script to set MALLOC_CONF, which + # is then used to set @JEMALLOC_CPREFIX@MALLOC_CONF (thus allowing the + # per test shell script to ignore the @JEMALLOC_CPREFIX@ detail). + enable_fill=@enable_fill@ \ + enable_prof=@enable_prof@ \ + . @srcroot@${t}.sh && \ + export_malloc_conf && \ + $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@ + else + export MALLOC_CONF= && \ + export_malloc_conf && \ + $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@ + fi + result_code=$? + case ${result_code} in + ${pass_code}) + pass_count=$((pass_count+1)) + ;; + ${skip_code}) + skip_count=$((skip_count+1)) + ;; + ${fail_code}) + fail_count=$((fail_count+1)) + ;; + *) + echo "Test harness error: ${t} w/ MALLOC_CONF=\"${MALLOC_CONF}\"" 1>&2 + echo "Use prefix to debug, e.g. JEMALLOC_TEST_PREFIX=\"gdb --args\" sh test/test.sh ${t}" 1>&2 + exit 1 + esac +done + +total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}` +echo +echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}" + +if [ ${fail_count} -eq 0 ] ; then + exit 0 +else + exit 1 +fi diff --git a/deps/jemalloc/test/unit/SFMT.c b/deps/jemalloc/test/unit/SFMT.c new file mode 100644 index 0000000..1fc8cf1 --- /dev/null +++ b/deps/jemalloc/test/unit/SFMT.c @@ -0,0 +1,1599 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "test/jemalloc_test.h" + +#define BLOCK_SIZE 10000 +#define BLOCK_SIZE64 (BLOCK_SIZE / 2) +#define COUNT_1 1000 +#define COUNT_2 700 + +static const uint32_t init_gen_rand_32_expected[] = { + 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U, + 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U, + 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U, + 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U, + 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U, + 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U, + 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U, + 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U, + 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U, + 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U, + 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U, + 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U, + 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U, + 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U, + 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U, + 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U, + 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U, + 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U, + 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U, + 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U, + 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U, + 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U, + 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U, + 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U, + 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U, + 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U, + 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U, + 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U, + 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U, + 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U, + 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U, + 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U, + 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U, + 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U, + 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U, + 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U, + 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U, + 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U, + 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U, + 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U, + 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U, + 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U, + 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U, + 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U, + 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U, + 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U, + 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U, + 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U, + 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U, + 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U, + 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U, + 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U, + 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U, + 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U, + 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U, + 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U, + 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U, + 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U, + 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U, + 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U, + 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U, + 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U, + 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U, + 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U, + 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U, + 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U, + 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U, + 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U, + 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U, + 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U, + 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U, + 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U, + 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U, + 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U, + 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U, + 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U, + 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U, + 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U, + 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U, + 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U, + 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U, + 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U, + 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U, + 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U, + 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U, + 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U, + 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U, + 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U, + 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U, + 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U, + 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U, + 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U, + 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U, + 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U, + 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U, + 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U, + 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U, + 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U, + 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U, + 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U, + 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U, + 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U, + 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U, + 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U, + 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U, + 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U, + 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U, + 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U, + 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U, + 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U, + 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U, + 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U, + 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U, + 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U, + 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U, + 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U, + 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U, + 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U, + 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U, + 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U, + 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U, + 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U, + 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U, + 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U, + 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U, + 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U, + 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U, + 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U, + 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U, + 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U, + 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U, + 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U, + 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U, + 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U, + 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U, + 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U, + 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U, + 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U, + 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U, + 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U, + 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U, + 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U, + 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U, + 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U, + 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U, + 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U, + 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U, + 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U, + 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U, + 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U, + 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U, + 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U, + 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U, + 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U, + 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U, + 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U, + 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U, + 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U, + 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U, + 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U, + 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U, + 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U, + 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U, + 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U, + 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U, + 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U, + 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U, + 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U, + 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U, + 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U, + 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U, + 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U, + 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U, + 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U, + 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U, + 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U, + 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U, + 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U, + 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U, + 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U, + 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U, + 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U, + 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U, + 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U, + 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U, + 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U, + 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U, + 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U, + 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U, + 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U, + 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U, + 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U, + 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U, + 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U, + 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U, + 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U, + 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U, + 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U, + 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U, + 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U +}; +static const uint32_t init_by_array_32_expected[] = { + 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U, + 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U, + 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U, + 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U, + 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U, + 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U, + 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U, + 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U, + 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U, + 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U, + 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U, + 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U, + 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U, + 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U, + 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U, + 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U, + 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U, + 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U, + 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U, + 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U, + 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U, + 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U, + 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U, + 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U, + 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U, + 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U, + 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U, + 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U, + 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U, + 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U, + 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U, + 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U, + 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U, + 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U, + 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U, + 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U, + 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U, + 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U, + 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U, + 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U, + 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U, + 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U, + 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U, + 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U, + 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U, + 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U, + 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U, + 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U, + 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U, + 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U, + 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U, + 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U, + 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U, + 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U, + 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U, + 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U, + 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U, + 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U, + 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U, + 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U, + 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U, + 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U, + 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U, + 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U, + 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U, + 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U, + 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U, + 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U, + 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U, + 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U, + 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U, + 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U, + 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U, + 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U, + 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U, + 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U, + 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U, + 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U, + 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U, + 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U, + 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U, + 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U, + 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U, + 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U, + 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U, + 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U, + 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U, + 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U, + 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U, + 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U, + 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U, + 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U, + 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U, + 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U, + 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U, + 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U, + 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U, + 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U, + 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U, + 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U, + 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U, + 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U, + 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U, + 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U, + 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U, + 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U, + 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U, + 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U, + 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U, + 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U, + 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U, + 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U, + 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U, + 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U, + 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U, + 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U, + 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U, + 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U, + 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U, + 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U, + 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U, + 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U, + 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U, + 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U, + 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U, + 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U, + 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U, + 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U, + 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U, + 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U, + 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U, + 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U, + 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U, + 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U, + 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U, + 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U, + 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U, + 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U, + 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U, + 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U, + 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U, + 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U, + 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U, + 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U, + 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U, + 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U, + 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U, + 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U, + 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U, + 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U, + 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U, + 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U, + 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U, + 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U, + 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U, + 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U, + 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U, + 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U, + 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U, + 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U, + 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U, + 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U, + 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U, + 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U, + 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U, + 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U, + 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U, + 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U, + 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U, + 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U, + 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U, + 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U, + 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U, + 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U, + 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U, + 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U, + 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U, + 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U, + 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U, + 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U, + 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U, + 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U, + 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U, + 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U, + 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U, + 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U, + 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U, + 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U, + 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U, + 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U, + 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U, + 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U, + 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U, + 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U, + 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U, + 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U, + 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U, + 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U, + 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U, + 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U +}; +static const uint64_t init_gen_rand_64_expected[] = { + KQU(16924766246869039260), KQU( 8201438687333352714), + KQU( 2265290287015001750), KQU(18397264611805473832), + KQU( 3375255223302384358), KQU( 6345559975416828796), + KQU(18229739242790328073), KQU( 7596792742098800905), + KQU( 255338647169685981), KQU( 2052747240048610300), + KQU(18328151576097299343), KQU(12472905421133796567), + KQU(11315245349717600863), KQU(16594110197775871209), + KQU(15708751964632456450), KQU(10452031272054632535), + KQU(11097646720811454386), KQU( 4556090668445745441), + KQU(17116187693090663106), KQU(14931526836144510645), + KQU( 9190752218020552591), KQU( 9625800285771901401), + KQU(13995141077659972832), KQU( 5194209094927829625), + KQU( 4156788379151063303), KQU( 8523452593770139494), + KQU(14082382103049296727), KQU( 2462601863986088483), + KQU( 3030583461592840678), KQU( 5221622077872827681), + KQU( 3084210671228981236), KQU(13956758381389953823), + KQU(13503889856213423831), KQU(15696904024189836170), + KQU( 4612584152877036206), KQU( 6231135538447867881), + KQU(10172457294158869468), KQU( 6452258628466708150), + KQU(14044432824917330221), KQU( 370168364480044279), + KQU(10102144686427193359), KQU( 667870489994776076), + KQU( 2732271956925885858), KQU(18027788905977284151), + KQU(15009842788582923859), KQU( 7136357960180199542), + KQU(15901736243475578127), KQU(16951293785352615701), + KQU(10551492125243691632), KQU(17668869969146434804), + KQU(13646002971174390445), KQU( 9804471050759613248), + KQU( 5511670439655935493), KQU(18103342091070400926), + KQU(17224512747665137533), KQU(15534627482992618168), + KQU( 1423813266186582647), KQU(15821176807932930024), + KQU( 30323369733607156), KQU(11599382494723479403), + KQU( 653856076586810062), KQU( 3176437395144899659), + KQU(14028076268147963917), KQU(16156398271809666195), + KQU( 3166955484848201676), KQU( 5746805620136919390), + KQU(17297845208891256593), KQU(11691653183226428483), + KQU(17900026146506981577), KQU(15387382115755971042), + KQU(16923567681040845943), KQU( 8039057517199388606), + KQU(11748409241468629263), KQU( 794358245539076095), + KQU(13438501964693401242), KQU(14036803236515618962), + KQU( 5252311215205424721), KQU(17806589612915509081), + KQU( 6802767092397596006), KQU(14212120431184557140), + KQU( 1072951366761385712), KQU(13098491780722836296), + KQU( 9466676828710797353), KQU(12673056849042830081), + KQU(12763726623645357580), KQU(16468961652999309493), + KQU(15305979875636438926), KQU(17444713151223449734), + KQU( 5692214267627883674), KQU(13049589139196151505), + KQU( 880115207831670745), KQU( 1776529075789695498), + KQU(16695225897801466485), KQU(10666901778795346845), + KQU( 6164389346722833869), KQU( 2863817793264300475), + KQU( 9464049921886304754), KQU( 3993566636740015468), + KQU( 9983749692528514136), KQU(16375286075057755211), + KQU(16042643417005440820), KQU(11445419662923489877), + KQU( 7999038846885158836), KQU( 6721913661721511535), + KQU( 5363052654139357320), KQU( 1817788761173584205), + KQU(13290974386445856444), KQU( 4650350818937984680), + KQU( 8219183528102484836), KQU( 1569862923500819899), + KQU( 4189359732136641860), KQU(14202822961683148583), + KQU( 4457498315309429058), KQU(13089067387019074834), + KQU(11075517153328927293), KQU(10277016248336668389), + KQU( 7070509725324401122), KQU(17808892017780289380), + KQU(13143367339909287349), KQU( 1377743745360085151), + KQU( 5749341807421286485), KQU(14832814616770931325), + KQU( 7688820635324359492), KQU(10960474011539770045), + KQU( 81970066653179790), KQU(12619476072607878022), + KQU( 4419566616271201744), KQU(15147917311750568503), + KQU( 5549739182852706345), KQU( 7308198397975204770), + KQU(13580425496671289278), KQU(17070764785210130301), + KQU( 8202832846285604405), KQU( 6873046287640887249), + KQU( 6927424434308206114), KQU( 6139014645937224874), + KQU(10290373645978487639), KQU(15904261291701523804), + KQU( 9628743442057826883), KQU(18383429096255546714), + KQU( 4977413265753686967), KQU( 7714317492425012869), + KQU( 9025232586309926193), KQU(14627338359776709107), + KQU(14759849896467790763), KQU(10931129435864423252), + KQU( 4588456988775014359), KQU(10699388531797056724), + KQU( 468652268869238792), KQU( 5755943035328078086), + KQU( 2102437379988580216), KQU( 9986312786506674028), + KQU( 2654207180040945604), KQU( 8726634790559960062), + KQU( 100497234871808137), KQU( 2800137176951425819), + KQU( 6076627612918553487), KQU( 5780186919186152796), + KQU( 8179183595769929098), KQU( 6009426283716221169), + KQU( 2796662551397449358), KQU( 1756961367041986764), + KQU( 6972897917355606205), KQU(14524774345368968243), + KQU( 2773529684745706940), KQU( 4853632376213075959), + KQU( 4198177923731358102), KQU( 8271224913084139776), + KQU( 2741753121611092226), KQU(16782366145996731181), + KQU(15426125238972640790), KQU(13595497100671260342), + KQU( 3173531022836259898), KQU( 6573264560319511662), + KQU(18041111951511157441), KQU( 2351433581833135952), + KQU( 3113255578908173487), KQU( 1739371330877858784), + KQU(16046126562789165480), KQU( 8072101652214192925), + KQU(15267091584090664910), KQU( 9309579200403648940), + KQU( 5218892439752408722), KQU(14492477246004337115), + KQU(17431037586679770619), KQU( 7385248135963250480), + KQU( 9580144956565560660), KQU( 4919546228040008720), + KQU(15261542469145035584), KQU(18233297270822253102), + KQU( 5453248417992302857), KQU( 9309519155931460285), + KQU(10342813012345291756), KQU(15676085186784762381), + KQU(15912092950691300645), KQU( 9371053121499003195), + KQU( 9897186478226866746), KQU(14061858287188196327), + KQU( 122575971620788119), KQU(12146750969116317754), + KQU( 4438317272813245201), KQU( 8332576791009527119), + KQU(13907785691786542057), KQU(10374194887283287467), + KQU( 2098798755649059566), KQU( 3416235197748288894), + KQU( 8688269957320773484), KQU( 7503964602397371571), + KQU(16724977015147478236), KQU( 9461512855439858184), + KQU(13259049744534534727), KQU( 3583094952542899294), + KQU( 8764245731305528292), KQU(13240823595462088985), + KQU(13716141617617910448), KQU(18114969519935960955), + KQU( 2297553615798302206), KQU( 4585521442944663362), + KQU(17776858680630198686), KQU( 4685873229192163363), + KQU( 152558080671135627), KQU(15424900540842670088), + KQU(13229630297130024108), KQU(17530268788245718717), + KQU(16675633913065714144), KQU( 3158912717897568068), + KQU(15399132185380087288), KQU( 7401418744515677872), + KQU(13135412922344398535), KQU( 6385314346100509511), + KQU(13962867001134161139), KQU(10272780155442671999), + KQU(12894856086597769142), KQU(13340877795287554994), + KQU(12913630602094607396), KQU(12543167911119793857), + KQU(17343570372251873096), KQU(10959487764494150545), + KQU( 6966737953093821128), KQU(13780699135496988601), + KQU( 4405070719380142046), KQU(14923788365607284982), + KQU( 2869487678905148380), KQU( 6416272754197188403), + KQU(15017380475943612591), KQU( 1995636220918429487), + KQU( 3402016804620122716), KQU(15800188663407057080), + KQU(11362369990390932882), KQU(15262183501637986147), + KQU(10239175385387371494), KQU( 9352042420365748334), + KQU( 1682457034285119875), KQU( 1724710651376289644), + KQU( 2038157098893817966), KQU( 9897825558324608773), + KQU( 1477666236519164736), KQU(16835397314511233640), + KQU(10370866327005346508), KQU(10157504370660621982), + KQU(12113904045335882069), KQU(13326444439742783008), + KQU(11302769043000765804), KQU(13594979923955228484), + KQU(11779351762613475968), KQU( 3786101619539298383), + KQU( 8021122969180846063), KQU(15745904401162500495), + KQU(10762168465993897267), KQU(13552058957896319026), + KQU(11200228655252462013), KQU( 5035370357337441226), + KQU( 7593918984545500013), KQU( 5418554918361528700), + KQU( 4858270799405446371), KQU( 9974659566876282544), + KQU(18227595922273957859), KQU( 2772778443635656220), + KQU(14285143053182085385), KQU( 9939700992429600469), + KQU(12756185904545598068), KQU( 2020783375367345262), + KQU( 57026775058331227), KQU( 950827867930065454), + KQU( 6602279670145371217), KQU( 2291171535443566929), + KQU( 5832380724425010313), KQU( 1220343904715982285), + KQU(17045542598598037633), KQU(15460481779702820971), + KQU(13948388779949365130), KQU(13975040175430829518), + KQU(17477538238425541763), KQU(11104663041851745725), + KQU(15860992957141157587), KQU(14529434633012950138), + KQU( 2504838019075394203), KQU( 7512113882611121886), + KQU( 4859973559980886617), KQU( 1258601555703250219), + KQU(15594548157514316394), KQU( 4516730171963773048), + KQU(11380103193905031983), KQU( 6809282239982353344), + KQU(18045256930420065002), KQU( 2453702683108791859), + KQU( 977214582986981460), KQU( 2006410402232713466), + KQU( 6192236267216378358), KQU( 3429468402195675253), + KQU(18146933153017348921), KQU(17369978576367231139), + KQU( 1246940717230386603), KQU(11335758870083327110), + KQU(14166488801730353682), KQU( 9008573127269635732), + KQU(10776025389820643815), KQU(15087605441903942962), + KQU( 1359542462712147922), KQU(13898874411226454206), + KQU(17911176066536804411), KQU( 9435590428600085274), + KQU( 294488509967864007), KQU( 8890111397567922046), + KQU( 7987823476034328778), KQU(13263827582440967651), + KQU( 7503774813106751573), KQU(14974747296185646837), + KQU( 8504765037032103375), KQU(17340303357444536213), + KQU( 7704610912964485743), KQU( 8107533670327205061), + KQU( 9062969835083315985), KQU(16968963142126734184), + KQU(12958041214190810180), KQU( 2720170147759570200), + KQU( 2986358963942189566), KQU(14884226322219356580), + KQU( 286224325144368520), KQU(11313800433154279797), + KQU(18366849528439673248), KQU(17899725929482368789), + KQU( 3730004284609106799), KQU( 1654474302052767205), + KQU( 5006698007047077032), KQU( 8196893913601182838), + KQU(15214541774425211640), KQU(17391346045606626073), + KQU( 8369003584076969089), KQU( 3939046733368550293), + KQU(10178639720308707785), KQU( 2180248669304388697), + KQU( 62894391300126322), KQU( 9205708961736223191), + KQU( 6837431058165360438), KQU( 3150743890848308214), + KQU(17849330658111464583), KQU(12214815643135450865), + KQU(13410713840519603402), KQU( 3200778126692046802), + KQU(13354780043041779313), KQU( 800850022756886036), + KQU(15660052933953067433), KQU( 6572823544154375676), + KQU(11030281857015819266), KQU(12682241941471433835), + KQU(11654136407300274693), KQU( 4517795492388641109), + KQU( 9757017371504524244), KQU(17833043400781889277), + KQU(12685085201747792227), KQU(10408057728835019573), + KQU( 98370418513455221), KQU( 6732663555696848598), + KQU(13248530959948529780), KQU( 3530441401230622826), + KQU(18188251992895660615), KQU( 1847918354186383756), + KQU( 1127392190402660921), KQU(11293734643143819463), + KQU( 3015506344578682982), KQU(13852645444071153329), + KQU( 2121359659091349142), KQU( 1294604376116677694), + KQU( 5616576231286352318), KQU( 7112502442954235625), + KQU(11676228199551561689), KQU(12925182803007305359), + KQU( 7852375518160493082), KQU( 1136513130539296154), + KQU( 5636923900916593195), KQU( 3221077517612607747), + KQU(17784790465798152513), KQU( 3554210049056995938), + KQU(17476839685878225874), KQU( 3206836372585575732), + KQU( 2765333945644823430), KQU(10080070903718799528), + KQU( 5412370818878286353), KQU( 9689685887726257728), + KQU( 8236117509123533998), KQU( 1951139137165040214), + KQU( 4492205209227980349), KQU(16541291230861602967), + KQU( 1424371548301437940), KQU( 9117562079669206794), + KQU(14374681563251691625), KQU(13873164030199921303), + KQU( 6680317946770936731), KQU(15586334026918276214), + KQU(10896213950976109802), KQU( 9506261949596413689), + KQU( 9903949574308040616), KQU( 6038397344557204470), + KQU( 174601465422373648), KQU(15946141191338238030), + KQU(17142225620992044937), KQU( 7552030283784477064), + KQU( 2947372384532947997), KQU( 510797021688197711), + KQU( 4962499439249363461), KQU( 23770320158385357), + KQU( 959774499105138124), KQU( 1468396011518788276), + KQU( 2015698006852312308), KQU( 4149400718489980136), + KQU( 5992916099522371188), KQU(10819182935265531076), + KQU(16189787999192351131), KQU( 342833961790261950), + KQU(12470830319550495336), KQU(18128495041912812501), + KQU( 1193600899723524337), KQU( 9056793666590079770), + KQU( 2154021227041669041), KQU( 4963570213951235735), + KQU( 4865075960209211409), KQU( 2097724599039942963), + KQU( 2024080278583179845), KQU(11527054549196576736), + KQU(10650256084182390252), KQU( 4808408648695766755), + KQU( 1642839215013788844), KQU(10607187948250398390), + KQU( 7076868166085913508), KQU( 730522571106887032), + KQU(12500579240208524895), KQU( 4484390097311355324), + KQU(15145801330700623870), KQU( 8055827661392944028), + KQU( 5865092976832712268), KQU(15159212508053625143), + KQU( 3560964582876483341), KQU( 4070052741344438280), + KQU( 6032585709886855634), KQU(15643262320904604873), + KQU( 2565119772293371111), KQU( 318314293065348260), + KQU(15047458749141511872), KQU( 7772788389811528730), + KQU( 7081187494343801976), KQU( 6465136009467253947), + KQU(10425940692543362069), KQU( 554608190318339115), + KQU(14796699860302125214), KQU( 1638153134431111443), + KQU(10336967447052276248), KQU( 8412308070396592958), + KQU( 4004557277152051226), KQU( 8143598997278774834), + KQU(16413323996508783221), KQU(13139418758033994949), + KQU( 9772709138335006667), KQU( 2818167159287157659), + KQU(17091740573832523669), KQU(14629199013130751608), + KQU(18268322711500338185), KQU( 8290963415675493063), + KQU( 8830864907452542588), KQU( 1614839084637494849), + KQU(14855358500870422231), KQU( 3472996748392519937), + KQU(15317151166268877716), KQU( 5825895018698400362), + KQU(16730208429367544129), KQU(10481156578141202800), + KQU( 4746166512382823750), KQU(12720876014472464998), + KQU( 8825177124486735972), KQU(13733447296837467838), + KQU( 6412293741681359625), KQU( 8313213138756135033), + KQU(11421481194803712517), KQU( 7997007691544174032), + KQU( 6812963847917605930), KQU( 9683091901227558641), + KQU(14703594165860324713), KQU( 1775476144519618309), + KQU( 2724283288516469519), KQU( 717642555185856868), + KQU( 8736402192215092346), KQU(11878800336431381021), + KQU( 4348816066017061293), KQU( 6115112756583631307), + KQU( 9176597239667142976), KQU(12615622714894259204), + KQU(10283406711301385987), KQU( 5111762509485379420), + KQU( 3118290051198688449), KQU( 7345123071632232145), + KQU( 9176423451688682359), KQU( 4843865456157868971), + KQU(12008036363752566088), KQU(12058837181919397720), + KQU( 2145073958457347366), KQU( 1526504881672818067), + KQU( 3488830105567134848), KQU(13208362960674805143), + KQU( 4077549672899572192), KQU( 7770995684693818365), + KQU( 1398532341546313593), KQU(12711859908703927840), + KQU( 1417561172594446813), KQU(17045191024194170604), + KQU( 4101933177604931713), KQU(14708428834203480320), + KQU(17447509264469407724), KQU(14314821973983434255), + KQU(17990472271061617265), KQU( 5087756685841673942), + KQU(12797820586893859939), KQU( 1778128952671092879), + KQU( 3535918530508665898), KQU( 9035729701042481301), + KQU(14808661568277079962), KQU(14587345077537747914), + KQU(11920080002323122708), KQU( 6426515805197278753), + KQU( 3295612216725984831), KQU(11040722532100876120), + KQU(12305952936387598754), KQU(16097391899742004253), + KQU( 4908537335606182208), KQU(12446674552196795504), + KQU(16010497855816895177), KQU( 9194378874788615551), + KQU( 3382957529567613384), KQU( 5154647600754974077), + KQU( 9801822865328396141), KQU( 9023662173919288143), + KQU(17623115353825147868), KQU( 8238115767443015816), + KQU(15811444159859002560), KQU( 9085612528904059661), + KQU( 6888601089398614254), KQU( 258252992894160189), + KQU( 6704363880792428622), KQU( 6114966032147235763), + KQU(11075393882690261875), KQU( 8797664238933620407), + KQU( 5901892006476726920), KQU( 5309780159285518958), + KQU(14940808387240817367), KQU(14642032021449656698), + KQU( 9808256672068504139), KQU( 3670135111380607658), + KQU(11211211097845960152), KQU( 1474304506716695808), + KQU(15843166204506876239), KQU( 7661051252471780561), + KQU(10170905502249418476), KQU( 7801416045582028589), + KQU( 2763981484737053050), KQU( 9491377905499253054), + KQU(16201395896336915095), KQU( 9256513756442782198), + KQU( 5411283157972456034), KQU( 5059433122288321676), + KQU( 4327408006721123357), KQU( 9278544078834433377), + KQU( 7601527110882281612), KQU(11848295896975505251), + KQU(12096998801094735560), KQU(14773480339823506413), + KQU(15586227433895802149), KQU(12786541257830242872), + KQU( 6904692985140503067), KQU( 5309011515263103959), + KQU(12105257191179371066), KQU(14654380212442225037), + KQU( 2556774974190695009), KQU( 4461297399927600261), + KQU(14888225660915118646), KQU(14915459341148291824), + KQU( 2738802166252327631), KQU( 6047155789239131512), + KQU(12920545353217010338), KQU(10697617257007840205), + KQU( 2751585253158203504), KQU(13252729159780047496), + KQU(14700326134672815469), KQU(14082527904374600529), + KQU(16852962273496542070), KQU(17446675504235853907), + KQU(15019600398527572311), KQU(12312781346344081551), + KQU(14524667935039810450), KQU( 5634005663377195738), + KQU(11375574739525000569), KQU( 2423665396433260040), + KQU( 5222836914796015410), KQU( 4397666386492647387), + KQU( 4619294441691707638), KQU( 665088602354770716), + KQU(13246495665281593610), KQU( 6564144270549729409), + KQU(10223216188145661688), KQU( 3961556907299230585), + KQU(11543262515492439914), KQU(16118031437285993790), + KQU( 7143417964520166465), KQU(13295053515909486772), + KQU( 40434666004899675), KQU(17127804194038347164), + KQU( 8599165966560586269), KQU( 8214016749011284903), + KQU(13725130352140465239), KQU( 5467254474431726291), + KQU( 7748584297438219877), KQU(16933551114829772472), + KQU( 2169618439506799400), KQU( 2169787627665113463), + KQU(17314493571267943764), KQU(18053575102911354912), + KQU(11928303275378476973), KQU(11593850925061715550), + KQU(17782269923473589362), KQU( 3280235307704747039), + KQU( 6145343578598685149), KQU(17080117031114086090), + KQU(18066839902983594755), KQU( 6517508430331020706), + KQU( 8092908893950411541), KQU(12558378233386153732), + KQU( 4476532167973132976), KQU(16081642430367025016), + KQU( 4233154094369139361), KQU( 8693630486693161027), + KQU(11244959343027742285), KQU(12273503967768513508), + KQU(14108978636385284876), KQU( 7242414665378826984), + KQU( 6561316938846562432), KQU( 8601038474994665795), + KQU(17532942353612365904), KQU(17940076637020912186), + KQU( 7340260368823171304), KQU( 7061807613916067905), + KQU(10561734935039519326), KQU(17990796503724650862), + KQU( 6208732943911827159), KQU( 359077562804090617), + KQU(14177751537784403113), KQU(10659599444915362902), + KQU(15081727220615085833), KQU(13417573895659757486), + KQU(15513842342017811524), KQU(11814141516204288231), + KQU( 1827312513875101814), KQU( 2804611699894603103), + KQU(17116500469975602763), KQU(12270191815211952087), + KQU(12256358467786024988), KQU(18435021722453971267), + KQU( 671330264390865618), KQU( 476504300460286050), + KQU(16465470901027093441), KQU( 4047724406247136402), + KQU( 1322305451411883346), KQU( 1388308688834322280), + KQU( 7303989085269758176), KQU( 9323792664765233642), + KQU( 4542762575316368936), KQU(17342696132794337618), + KQU( 4588025054768498379), KQU(13415475057390330804), + KQU(17880279491733405570), KQU(10610553400618620353), + KQU( 3180842072658960139), KQU(13002966655454270120), + KQU( 1665301181064982826), KQU( 7083673946791258979), + KQU( 190522247122496820), KQU(17388280237250677740), + KQU( 8430770379923642945), KQU(12987180971921668584), + KQU( 2311086108365390642), KQU( 2870984383579822345), + KQU(14014682609164653318), KQU(14467187293062251484), + KQU( 192186361147413298), KQU(15171951713531796524), + KQU( 9900305495015948728), KQU(17958004775615466344), + KQU(14346380954498606514), KQU(18040047357617407096), + KQU( 5035237584833424532), KQU(15089555460613972287), + KQU( 4131411873749729831), KQU( 1329013581168250330), + KQU(10095353333051193949), KQU(10749518561022462716), + KQU( 9050611429810755847), KQU(15022028840236655649), + KQU( 8775554279239748298), KQU(13105754025489230502), + KQU(15471300118574167585), KQU( 89864764002355628), + KQU( 8776416323420466637), KQU( 5280258630612040891), + KQU( 2719174488591862912), KQU( 7599309137399661994), + KQU(15012887256778039979), KQU(14062981725630928925), + KQU(12038536286991689603), KQU( 7089756544681775245), + KQU(10376661532744718039), KQU( 1265198725901533130), + KQU(13807996727081142408), KQU( 2935019626765036403), + KQU( 7651672460680700141), KQU( 3644093016200370795), + KQU( 2840982578090080674), KQU(17956262740157449201), + KQU(18267979450492880548), KQU(11799503659796848070), + KQU( 9942537025669672388), KQU(11886606816406990297), + KQU( 5488594946437447576), KQU( 7226714353282744302), + KQU( 3784851653123877043), KQU( 878018453244803041), + KQU(12110022586268616085), KQU( 734072179404675123), + KQU(11869573627998248542), KQU( 469150421297783998), + KQU( 260151124912803804), KQU(11639179410120968649), + KQU( 9318165193840846253), KQU(12795671722734758075), + KQU(15318410297267253933), KQU( 691524703570062620), + KQU( 5837129010576994601), KQU(15045963859726941052), + KQU( 5850056944932238169), KQU(12017434144750943807), + KQU( 7447139064928956574), KQU( 3101711812658245019), + KQU(16052940704474982954), KQU(18195745945986994042), + KQU( 8932252132785575659), KQU(13390817488106794834), + KQU(11582771836502517453), KQU( 4964411326683611686), + KQU( 2195093981702694011), KQU(14145229538389675669), + KQU(16459605532062271798), KQU( 866316924816482864), + KQU( 4593041209937286377), KQU( 8415491391910972138), + KQU( 4171236715600528969), KQU(16637569303336782889), + KQU( 2002011073439212680), KQU(17695124661097601411), + KQU( 4627687053598611702), KQU( 7895831936020190403), + KQU( 8455951300917267802), KQU( 2923861649108534854), + KQU( 8344557563927786255), KQU( 6408671940373352556), + KQU(12210227354536675772), KQU(14294804157294222295), + KQU(10103022425071085127), KQU(10092959489504123771), + KQU( 6554774405376736268), KQU(12629917718410641774), + KQU( 6260933257596067126), KQU( 2460827021439369673), + KQU( 2541962996717103668), KQU( 597377203127351475), + KQU( 5316984203117315309), KQU( 4811211393563241961), + KQU(13119698597255811641), KQU( 8048691512862388981), + KQU(10216818971194073842), KQU( 4612229970165291764), + KQU(10000980798419974770), KQU( 6877640812402540687), + KQU( 1488727563290436992), KQU( 2227774069895697318), + KQU(11237754507523316593), KQU(13478948605382290972), + KQU( 1963583846976858124), KQU( 5512309205269276457), + KQU( 3972770164717652347), KQU( 3841751276198975037), + KQU(10283343042181903117), KQU( 8564001259792872199), + KQU(16472187244722489221), KQU( 8953493499268945921), + KQU( 3518747340357279580), KQU( 4003157546223963073), + KQU( 3270305958289814590), KQU( 3966704458129482496), + KQU( 8122141865926661939), KQU(14627734748099506653), + KQU(13064426990862560568), KQU( 2414079187889870829), + KQU( 5378461209354225306), KQU(10841985740128255566), + KQU( 538582442885401738), KQU( 7535089183482905946), + KQU(16117559957598879095), KQU( 8477890721414539741), + KQU( 1459127491209533386), KQU(17035126360733620462), + KQU( 8517668552872379126), KQU(10292151468337355014), + KQU(17081267732745344157), KQU(13751455337946087178), + KQU(14026945459523832966), KQU( 6653278775061723516), + KQU(10619085543856390441), KQU( 2196343631481122885), + KQU(10045966074702826136), KQU(10082317330452718282), + KQU( 5920859259504831242), KQU( 9951879073426540617), + KQU( 7074696649151414158), KQU(15808193543879464318), + KQU( 7385247772746953374), KQU( 3192003544283864292), + KQU(18153684490917593847), KQU(12423498260668568905), + KQU(10957758099756378169), KQU(11488762179911016040), + KQU( 2099931186465333782), KQU(11180979581250294432), + KQU( 8098916250668367933), KQU( 3529200436790763465), + KQU(12988418908674681745), KQU( 6147567275954808580), + KQU( 3207503344604030989), KQU(10761592604898615360), + KQU( 229854861031893504), KQU( 8809853962667144291), + KQU(13957364469005693860), KQU( 7634287665224495886), + KQU(12353487366976556874), KQU( 1134423796317152034), + KQU( 2088992471334107068), KQU( 7393372127190799698), + KQU( 1845367839871058391), KQU( 207922563987322884), + KQU(11960870813159944976), KQU(12182120053317317363), + KQU(17307358132571709283), KQU(13871081155552824936), + KQU(18304446751741566262), KQU( 7178705220184302849), + KQU(10929605677758824425), KQU(16446976977835806844), + KQU(13723874412159769044), KQU( 6942854352100915216), + KQU( 1726308474365729390), KQU( 2150078766445323155), + KQU(15345558947919656626), KQU(12145453828874527201), + KQU( 2054448620739726849), KQU( 2740102003352628137), + KQU(11294462163577610655), KQU( 756164283387413743), + KQU(17841144758438810880), KQU(10802406021185415861), + KQU( 8716455530476737846), KQU( 6321788834517649606), + KQU(14681322910577468426), KQU(17330043563884336387), + KQU(12701802180050071614), KQU(14695105111079727151), + KQU( 5112098511654172830), KQU( 4957505496794139973), + KQU( 8270979451952045982), KQU(12307685939199120969), + KQU(12425799408953443032), KQU( 8376410143634796588), + KQU(16621778679680060464), KQU( 3580497854566660073), + KQU( 1122515747803382416), KQU( 857664980960597599), + KQU( 6343640119895925918), KQU(12878473260854462891), + KQU(10036813920765722626), KQU(14451335468363173812), + KQU( 5476809692401102807), KQU(16442255173514366342), + KQU(13060203194757167104), KQU(14354124071243177715), + KQU(15961249405696125227), KQU(13703893649690872584), + KQU( 363907326340340064), KQU( 6247455540491754842), + KQU(12242249332757832361), KQU( 156065475679796717), + KQU( 9351116235749732355), KQU( 4590350628677701405), + KQU( 1671195940982350389), KQU(13501398458898451905), + KQU( 6526341991225002255), KQU( 1689782913778157592), + KQU( 7439222350869010334), KQU(13975150263226478308), + KQU(11411961169932682710), KQU(17204271834833847277), + KQU( 541534742544435367), KQU( 6591191931218949684), + KQU( 2645454775478232486), KQU( 4322857481256485321), + KQU( 8477416487553065110), KQU(12902505428548435048), + KQU( 971445777981341415), KQU(14995104682744976712), + KQU( 4243341648807158063), KQU( 8695061252721927661), + KQU( 5028202003270177222), KQU( 2289257340915567840), + KQU(13870416345121866007), KQU(13994481698072092233), + KQU( 6912785400753196481), KQU( 2278309315841980139), + KQU( 4329765449648304839), KQU( 5963108095785485298), + KQU( 4880024847478722478), KQU(16015608779890240947), + KQU( 1866679034261393544), KQU( 914821179919731519), + KQU( 9643404035648760131), KQU( 2418114953615593915), + KQU( 944756836073702374), KQU(15186388048737296834), + KQU( 7723355336128442206), KQU( 7500747479679599691), + KQU(18013961306453293634), KQU( 2315274808095756456), + KQU(13655308255424029566), KQU(17203800273561677098), + KQU( 1382158694422087756), KQU( 5090390250309588976), + KQU( 517170818384213989), KQU( 1612709252627729621), + KQU( 1330118955572449606), KQU( 300922478056709885), + KQU(18115693291289091987), KQU(13491407109725238321), + KQU(15293714633593827320), KQU( 5151539373053314504), + KQU( 5951523243743139207), KQU(14459112015249527975), + KQU( 5456113959000700739), KQU( 3877918438464873016), + KQU(12534071654260163555), KQU(15871678376893555041), + KQU(11005484805712025549), KQU(16353066973143374252), + KQU( 4358331472063256685), KQU( 8268349332210859288), + KQU(12485161590939658075), KQU(13955993592854471343), + KQU( 5911446886848367039), KQU(14925834086813706974), + KQU( 6590362597857994805), KQU( 1280544923533661875), + KQU( 1637756018947988164), KQU( 4734090064512686329), + KQU(16693705263131485912), KQU( 6834882340494360958), + KQU( 8120732176159658505), KQU( 2244371958905329346), + KQU(10447499707729734021), KQU( 7318742361446942194), + KQU( 8032857516355555296), KQU(14023605983059313116), + KQU( 1032336061815461376), KQU( 9840995337876562612), + KQU( 9869256223029203587), KQU(12227975697177267636), + KQU(12728115115844186033), KQU( 7752058479783205470), + KQU( 729733219713393087), KQU(12954017801239007622) +}; +static const uint64_t init_by_array_64_expected[] = { + KQU( 2100341266307895239), KQU( 8344256300489757943), + KQU(15687933285484243894), KQU( 8268620370277076319), + KQU(12371852309826545459), KQU( 8800491541730110238), + KQU(18113268950100835773), KQU( 2886823658884438119), + KQU( 3293667307248180724), KQU( 9307928143300172731), + KQU( 7688082017574293629), KQU( 900986224735166665), + KQU( 9977972710722265039), KQU( 6008205004994830552), + KQU( 546909104521689292), KQU( 7428471521869107594), + KQU(14777563419314721179), KQU(16116143076567350053), + KQU( 5322685342003142329), KQU( 4200427048445863473), + KQU( 4693092150132559146), KQU(13671425863759338582), + KQU( 6747117460737639916), KQU( 4732666080236551150), + KQU( 5912839950611941263), KQU( 3903717554504704909), + KQU( 2615667650256786818), KQU(10844129913887006352), + KQU(13786467861810997820), KQU(14267853002994021570), + KQU(13767807302847237439), KQU(16407963253707224617), + KQU( 4802498363698583497), KQU( 2523802839317209764), + KQU( 3822579397797475589), KQU( 8950320572212130610), + KQU( 3745623504978342534), KQU(16092609066068482806), + KQU( 9817016950274642398), KQU(10591660660323829098), + KQU(11751606650792815920), KQU( 5122873818577122211), + KQU(17209553764913936624), KQU( 6249057709284380343), + KQU(15088791264695071830), KQU(15344673071709851930), + KQU( 4345751415293646084), KQU( 2542865750703067928), + KQU(13520525127852368784), KQU(18294188662880997241), + KQU( 3871781938044881523), KQU( 2873487268122812184), + KQU(15099676759482679005), KQU(15442599127239350490), + KQU( 6311893274367710888), KQU( 3286118760484672933), + KQU( 4146067961333542189), KQU(13303942567897208770), + KQU( 8196013722255630418), KQU( 4437815439340979989), + KQU(15433791533450605135), KQU( 4254828956815687049), + KQU( 1310903207708286015), KQU(10529182764462398549), + KQU(14900231311660638810), KQU( 9727017277104609793), + KQU( 1821308310948199033), KQU(11628861435066772084), + KQU( 9469019138491546924), KQU( 3145812670532604988), + KQU( 9938468915045491919), KQU( 1562447430672662142), + KQU(13963995266697989134), KQU( 3356884357625028695), + KQU( 4499850304584309747), KQU( 8456825817023658122), + KQU(10859039922814285279), KQU( 8099512337972526555), + KQU( 348006375109672149), KQU(11919893998241688603), + KQU( 1104199577402948826), KQU(16689191854356060289), + KQU(10992552041730168078), KQU( 7243733172705465836), + KQU( 5668075606180319560), KQU(18182847037333286970), + KQU( 4290215357664631322), KQU( 4061414220791828613), + KQU(13006291061652989604), KQU( 7140491178917128798), + KQU(12703446217663283481), KQU( 5500220597564558267), + KQU(10330551509971296358), KQU(15958554768648714492), + KQU( 5174555954515360045), KQU( 1731318837687577735), + KQU( 3557700801048354857), KQU(13764012341928616198), + KQU(13115166194379119043), KQU( 7989321021560255519), + KQU( 2103584280905877040), KQU( 9230788662155228488), + KQU(16396629323325547654), KQU( 657926409811318051), + KQU(15046700264391400727), KQU( 5120132858771880830), + KQU( 7934160097989028561), KQU( 6963121488531976245), + KQU(17412329602621742089), KQU(15144843053931774092), + KQU(17204176651763054532), KQU(13166595387554065870), + KQU( 8590377810513960213), KQU( 5834365135373991938), + KQU( 7640913007182226243), KQU( 3479394703859418425), + KQU(16402784452644521040), KQU( 4993979809687083980), + KQU(13254522168097688865), KQU(15643659095244365219), + KQU( 5881437660538424982), KQU(11174892200618987379), + KQU( 254409966159711077), KQU(17158413043140549909), + KQU( 3638048789290376272), KQU( 1376816930299489190), + KQU( 4622462095217761923), KQU(15086407973010263515), + KQU(13253971772784692238), KQU( 5270549043541649236), + KQU(11182714186805411604), KQU(12283846437495577140), + KQU( 5297647149908953219), KQU(10047451738316836654), + KQU( 4938228100367874746), KQU(12328523025304077923), + KQU( 3601049438595312361), KQU( 9313624118352733770), + KQU(13322966086117661798), KQU(16660005705644029394), + KQU(11337677526988872373), KQU(13869299102574417795), + KQU(15642043183045645437), KQU( 3021755569085880019), + KQU( 4979741767761188161), KQU(13679979092079279587), + KQU( 3344685842861071743), KQU(13947960059899588104), + KQU( 305806934293368007), KQU( 5749173929201650029), + KQU(11123724852118844098), KQU(15128987688788879802), + KQU(15251651211024665009), KQU( 7689925933816577776), + KQU(16732804392695859449), KQU(17087345401014078468), + KQU(14315108589159048871), KQU( 4820700266619778917), + KQU(16709637539357958441), KQU( 4936227875177351374), + KQU( 2137907697912987247), KQU(11628565601408395420), + KQU( 2333250549241556786), KQU( 5711200379577778637), + KQU( 5170680131529031729), KQU(12620392043061335164), + KQU( 95363390101096078), KQU( 5487981914081709462), + KQU( 1763109823981838620), KQU( 3395861271473224396), + KQU( 1300496844282213595), KQU( 6894316212820232902), + KQU(10673859651135576674), KQU( 5911839658857903252), + KQU(17407110743387299102), KQU( 8257427154623140385), + KQU(11389003026741800267), KQU( 4070043211095013717), + KQU(11663806997145259025), KQU(15265598950648798210), + KQU( 630585789434030934), KQU( 3524446529213587334), + KQU( 7186424168495184211), KQU(10806585451386379021), + KQU(11120017753500499273), KQU( 1586837651387701301), + KQU(17530454400954415544), KQU( 9991670045077880430), + KQU( 7550997268990730180), KQU( 8640249196597379304), + KQU( 3522203892786893823), KQU(10401116549878854788), + KQU(13690285544733124852), KQU( 8295785675455774586), + KQU(15535716172155117603), KQU( 3112108583723722511), + KQU(17633179955339271113), KQU(18154208056063759375), + KQU( 1866409236285815666), KQU(13326075895396412882), + KQU( 8756261842948020025), KQU( 6281852999868439131), + KQU(15087653361275292858), KQU(10333923911152949397), + KQU( 5265567645757408500), KQU(12728041843210352184), + KQU( 6347959327507828759), KQU( 154112802625564758), + KQU(18235228308679780218), KQU( 3253805274673352418), + KQU( 4849171610689031197), KQU(17948529398340432518), + KQU(13803510475637409167), KQU(13506570190409883095), + KQU(15870801273282960805), KQU( 8451286481299170773), + KQU( 9562190620034457541), KQU( 8518905387449138364), + KQU(12681306401363385655), KQU( 3788073690559762558), + KQU( 5256820289573487769), KQU( 2752021372314875467), + KQU( 6354035166862520716), KQU( 4328956378309739069), + KQU( 449087441228269600), KQU( 5533508742653090868), + KQU( 1260389420404746988), KQU(18175394473289055097), + KQU( 1535467109660399420), KQU( 8818894282874061442), + KQU(12140873243824811213), KQU(15031386653823014946), + KQU( 1286028221456149232), KQU( 6329608889367858784), + KQU( 9419654354945132725), KQU( 6094576547061672379), + KQU(17706217251847450255), KQU( 1733495073065878126), + KQU(16918923754607552663), KQU( 8881949849954945044), + KQU(12938977706896313891), KQU(14043628638299793407), + KQU(18393874581723718233), KQU( 6886318534846892044), + KQU(14577870878038334081), KQU(13541558383439414119), + KQU(13570472158807588273), KQU(18300760537910283361), + KQU( 818368572800609205), KQU( 1417000585112573219), + KQU(12337533143867683655), KQU(12433180994702314480), + KQU( 778190005829189083), KQU(13667356216206524711), + KQU( 9866149895295225230), KQU(11043240490417111999), + KQU( 1123933826541378598), KQU( 6469631933605123610), + KQU(14508554074431980040), KQU(13918931242962026714), + KQU( 2870785929342348285), KQU(14786362626740736974), + KQU(13176680060902695786), KQU( 9591778613541679456), + KQU( 9097662885117436706), KQU( 749262234240924947), + KQU( 1944844067793307093), KQU( 4339214904577487742), + KQU( 8009584152961946551), KQU(16073159501225501777), + KQU( 3335870590499306217), KQU(17088312653151202847), + KQU( 3108893142681931848), KQU(16636841767202792021), + KQU(10423316431118400637), KQU( 8008357368674443506), + KQU(11340015231914677875), KQU(17687896501594936090), + KQU(15173627921763199958), KQU( 542569482243721959), + KQU(15071714982769812975), KQU( 4466624872151386956), + KQU( 1901780715602332461), KQU( 9822227742154351098), + KQU( 1479332892928648780), KQU( 6981611948382474400), + KQU( 7620824924456077376), KQU(14095973329429406782), + KQU( 7902744005696185404), KQU(15830577219375036920), + KQU(10287076667317764416), KQU(12334872764071724025), + KQU( 4419302088133544331), KQU(14455842851266090520), + KQU(12488077416504654222), KQU( 7953892017701886766), + KQU( 6331484925529519007), KQU( 4902145853785030022), + KQU(17010159216096443073), KQU(11945354668653886087), + KQU(15112022728645230829), KQU(17363484484522986742), + KQU( 4423497825896692887), KQU( 8155489510809067471), + KQU( 258966605622576285), KQU( 5462958075742020534), + KQU( 6763710214913276228), KQU( 2368935183451109054), + KQU(14209506165246453811), KQU( 2646257040978514881), + KQU( 3776001911922207672), KQU( 1419304601390147631), + KQU(14987366598022458284), KQU( 3977770701065815721), + KQU( 730820417451838898), KQU( 3982991703612885327), + KQU( 2803544519671388477), KQU(17067667221114424649), + KQU( 2922555119737867166), KQU( 1989477584121460932), + KQU(15020387605892337354), KQU( 9293277796427533547), + KQU(10722181424063557247), KQU(16704542332047511651), + KQU( 5008286236142089514), KQU(16174732308747382540), + KQU(17597019485798338402), KQU(13081745199110622093), + KQU( 8850305883842258115), KQU(12723629125624589005), + KQU( 8140566453402805978), KQU(15356684607680935061), + KQU(14222190387342648650), KQU(11134610460665975178), + KQU( 1259799058620984266), KQU(13281656268025610041), + KQU( 298262561068153992), KQU(12277871700239212922), + KQU(13911297774719779438), KQU(16556727962761474934), + KQU(17903010316654728010), KQU( 9682617699648434744), + KQU(14757681836838592850), KQU( 1327242446558524473), + KQU(11126645098780572792), KQU( 1883602329313221774), + KQU( 2543897783922776873), KQU(15029168513767772842), + KQU(12710270651039129878), KQU(16118202956069604504), + KQU(15010759372168680524), KQU( 2296827082251923948), + KQU(10793729742623518101), KQU(13829764151845413046), + KQU(17769301223184451213), KQU( 3118268169210783372), + KQU(17626204544105123127), KQU( 7416718488974352644), + KQU(10450751996212925994), KQU( 9352529519128770586), + KQU( 259347569641110140), KQU( 8048588892269692697), + KQU( 1774414152306494058), KQU(10669548347214355622), + KQU(13061992253816795081), KQU(18432677803063861659), + KQU( 8879191055593984333), KQU(12433753195199268041), + KQU(14919392415439730602), KQU( 6612848378595332963), + KQU( 6320986812036143628), KQU(10465592420226092859), + KQU( 4196009278962570808), KQU( 3747816564473572224), + KQU(17941203486133732898), KQU( 2350310037040505198), + KQU( 5811779859134370113), KQU(10492109599506195126), + KQU( 7699650690179541274), KQU( 1954338494306022961), + KQU(14095816969027231152), KQU( 5841346919964852061), + KQU(14945969510148214735), KQU( 3680200305887550992), + KQU( 6218047466131695792), KQU( 8242165745175775096), + KQU(11021371934053307357), KQU( 1265099502753169797), + KQU( 4644347436111321718), KQU( 3609296916782832859), + KQU( 8109807992218521571), KQU(18387884215648662020), + KQU(14656324896296392902), KQU(17386819091238216751), + KQU(17788300878582317152), KQU( 7919446259742399591), + KQU( 4466613134576358004), KQU(12928181023667938509), + KQU(13147446154454932030), KQU(16552129038252734620), + KQU( 8395299403738822450), KQU(11313817655275361164), + KQU( 434258809499511718), KQU( 2074882104954788676), + KQU( 7929892178759395518), KQU( 9006461629105745388), + KQU( 5176475650000323086), KQU(11128357033468341069), + KQU(12026158851559118955), KQU(14699716249471156500), + KQU( 448982497120206757), KQU( 4156475356685519900), + KQU( 6063816103417215727), KQU(10073289387954971479), + KQU( 8174466846138590962), KQU( 2675777452363449006), + KQU( 9090685420572474281), KQU( 6659652652765562060), + KQU(12923120304018106621), KQU(11117480560334526775), + KQU( 937910473424587511), KQU( 1838692113502346645), + KQU(11133914074648726180), KQU( 7922600945143884053), + KQU(13435287702700959550), KQU( 5287964921251123332), + KQU(11354875374575318947), KQU(17955724760748238133), + KQU(13728617396297106512), KQU( 4107449660118101255), + KQU( 1210269794886589623), KQU(11408687205733456282), + KQU( 4538354710392677887), KQU(13566803319341319267), + KQU(17870798107734050771), KQU( 3354318982568089135), + KQU( 9034450839405133651), KQU(13087431795753424314), + KQU( 950333102820688239), KQU( 1968360654535604116), + KQU(16840551645563314995), KQU( 8867501803892924995), + KQU(11395388644490626845), KQU( 1529815836300732204), + KQU(13330848522996608842), KQU( 1813432878817504265), + KQU( 2336867432693429560), KQU(15192805445973385902), + KQU( 2528593071076407877), KQU( 128459777936689248), + KQU( 9976345382867214866), KQU( 6208885766767996043), + KQU(14982349522273141706), KQU( 3099654362410737822), + KQU(13776700761947297661), KQU( 8806185470684925550), + KQU( 8151717890410585321), KQU( 640860591588072925), + KQU(14592096303937307465), KQU( 9056472419613564846), + KQU(14861544647742266352), KQU(12703771500398470216), + KQU( 3142372800384138465), KQU( 6201105606917248196), + KQU(18337516409359270184), KQU(15042268695665115339), + KQU(15188246541383283846), KQU(12800028693090114519), + KQU( 5992859621101493472), KQU(18278043971816803521), + KQU( 9002773075219424560), KQU( 7325707116943598353), + KQU( 7930571931248040822), KQU( 5645275869617023448), + KQU( 7266107455295958487), KQU( 4363664528273524411), + KQU(14313875763787479809), KQU(17059695613553486802), + KQU( 9247761425889940932), KQU(13704726459237593128), + KQU( 2701312427328909832), KQU(17235532008287243115), + KQU(14093147761491729538), KQU( 6247352273768386516), + KQU( 8268710048153268415), KQU( 7985295214477182083), + KQU(15624495190888896807), KQU( 3772753430045262788), + KQU( 9133991620474991698), KQU( 5665791943316256028), + KQU( 7551996832462193473), KQU(13163729206798953877), + KQU( 9263532074153846374), KQU( 1015460703698618353), + KQU(17929874696989519390), KQU(18257884721466153847), + KQU(16271867543011222991), KQU( 3905971519021791941), + KQU(16814488397137052085), KQU( 1321197685504621613), + KQU( 2870359191894002181), KQU(14317282970323395450), + KQU(13663920845511074366), KQU( 2052463995796539594), + KQU(14126345686431444337), KQU( 1727572121947022534), + KQU(17793552254485594241), KQU( 6738857418849205750), + KQU( 1282987123157442952), KQU(16655480021581159251), + KQU( 6784587032080183866), KQU(14726758805359965162), + KQU( 7577995933961987349), KQU(12539609320311114036), + KQU(10789773033385439494), KQU( 8517001497411158227), + KQU(10075543932136339710), KQU(14838152340938811081), + KQU( 9560840631794044194), KQU(17445736541454117475), + KQU(10633026464336393186), KQU(15705729708242246293), + KQU( 1117517596891411098), KQU( 4305657943415886942), + KQU( 4948856840533979263), KQU(16071681989041789593), + KQU(13723031429272486527), KQU( 7639567622306509462), + KQU(12670424537483090390), KQU( 9715223453097197134), + KQU( 5457173389992686394), KQU( 289857129276135145), + KQU(17048610270521972512), KQU( 692768013309835485), + KQU(14823232360546632057), KQU(18218002361317895936), + KQU( 3281724260212650204), KQU(16453957266549513795), + KQU( 8592711109774511881), KQU( 929825123473369579), + KQU(15966784769764367791), KQU( 9627344291450607588), + KQU(10849555504977813287), KQU( 9234566913936339275), + KQU( 6413807690366911210), KQU(10862389016184219267), + KQU(13842504799335374048), KQU( 1531994113376881174), + KQU( 2081314867544364459), KQU(16430628791616959932), + KQU( 8314714038654394368), KQU( 9155473892098431813), + KQU(12577843786670475704), KQU( 4399161106452401017), + KQU( 1668083091682623186), KQU( 1741383777203714216), + KQU( 2162597285417794374), KQU(15841980159165218736), + KQU( 1971354603551467079), KQU( 1206714764913205968), + KQU( 4790860439591272330), KQU(14699375615594055799), + KQU( 8374423871657449988), KQU(10950685736472937738), + KQU( 697344331343267176), KQU(10084998763118059810), + KQU(12897369539795983124), KQU(12351260292144383605), + KQU( 1268810970176811234), KQU( 7406287800414582768), + KQU( 516169557043807831), KQU( 5077568278710520380), + KQU( 3828791738309039304), KQU( 7721974069946943610), + KQU( 3534670260981096460), KQU( 4865792189600584891), + KQU(16892578493734337298), KQU( 9161499464278042590), + KQU(11976149624067055931), KQU(13219479887277343990), + KQU(14161556738111500680), KQU(14670715255011223056), + KQU( 4671205678403576558), KQU(12633022931454259781), + KQU(14821376219869187646), KQU( 751181776484317028), + KQU( 2192211308839047070), KQU(11787306362361245189), + KQU(10672375120744095707), KQU( 4601972328345244467), + KQU(15457217788831125879), KQU( 8464345256775460809), + KQU(10191938789487159478), KQU( 6184348739615197613), + KQU(11425436778806882100), KQU( 2739227089124319793), + KQU( 461464518456000551), KQU( 4689850170029177442), + KQU( 6120307814374078625), KQU(11153579230681708671), + KQU( 7891721473905347926), KQU(10281646937824872400), + KQU( 3026099648191332248), KQU( 8666750296953273818), + KQU(14978499698844363232), KQU(13303395102890132065), + KQU( 8182358205292864080), KQU(10560547713972971291), + KQU(11981635489418959093), KQU( 3134621354935288409), + KQU(11580681977404383968), KQU(14205530317404088650), + KQU( 5997789011854923157), KQU(13659151593432238041), + KQU(11664332114338865086), KQU( 7490351383220929386), + KQU( 7189290499881530378), KQU(15039262734271020220), + KQU( 2057217285976980055), KQU( 555570804905355739), + KQU(11235311968348555110), KQU(13824557146269603217), + KQU(16906788840653099693), KQU( 7222878245455661677), + KQU( 5245139444332423756), KQU( 4723748462805674292), + KQU(12216509815698568612), KQU(17402362976648951187), + KQU(17389614836810366768), KQU( 4880936484146667711), + KQU( 9085007839292639880), KQU(13837353458498535449), + KQU(11914419854360366677), KQU(16595890135313864103), + KQU( 6313969847197627222), KQU(18296909792163910431), + KQU(10041780113382084042), KQU( 2499478551172884794), + KQU(11057894246241189489), KQU( 9742243032389068555), + KQU(12838934582673196228), KQU(13437023235248490367), + KQU(13372420669446163240), KQU( 6752564244716909224), + KQU( 7157333073400313737), KQU(12230281516370654308), + KQU( 1182884552219419117), KQU( 2955125381312499218), + KQU(10308827097079443249), KQU( 1337648572986534958), + KQU(16378788590020343939), KQU( 108619126514420935), + KQU( 3990981009621629188), KQU( 5460953070230946410), + KQU( 9703328329366531883), KQU(13166631489188077236), + KQU( 1104768831213675170), KQU( 3447930458553877908), + KQU( 8067172487769945676), KQU( 5445802098190775347), + KQU( 3244840981648973873), KQU(17314668322981950060), + KQU( 5006812527827763807), KQU(18158695070225526260), + KQU( 2824536478852417853), KQU(13974775809127519886), + KQU( 9814362769074067392), KQU(17276205156374862128), + KQU(11361680725379306967), KQU( 3422581970382012542), + KQU(11003189603753241266), KQU(11194292945277862261), + KQU( 6839623313908521348), KQU(11935326462707324634), + KQU( 1611456788685878444), KQU(13112620989475558907), + KQU( 517659108904450427), KQU(13558114318574407624), + KQU(15699089742731633077), KQU( 4988979278862685458), + KQU( 8111373583056521297), KQU( 3891258746615399627), + KQU( 8137298251469718086), KQU(12748663295624701649), + KQU( 4389835683495292062), KQU( 5775217872128831729), + KQU( 9462091896405534927), KQU( 8498124108820263989), + KQU( 8059131278842839525), KQU(10503167994254090892), + KQU(11613153541070396656), KQU(18069248738504647790), + KQU( 570657419109768508), KQU( 3950574167771159665), + KQU( 5514655599604313077), KQU( 2908460854428484165), + KQU(10777722615935663114), KQU(12007363304839279486), + KQU( 9800646187569484767), KQU( 8795423564889864287), + KQU(14257396680131028419), KQU( 6405465117315096498), + KQU( 7939411072208774878), KQU(17577572378528990006), + KQU(14785873806715994850), KQU(16770572680854747390), + KQU(18127549474419396481), KQU(11637013449455757750), + KQU(14371851933996761086), KQU( 3601181063650110280), + KQU( 4126442845019316144), KQU(10198287239244320669), + KQU(18000169628555379659), KQU(18392482400739978269), + KQU( 6219919037686919957), KQU( 3610085377719446052), + KQU( 2513925039981776336), KQU(16679413537926716955), + KQU(12903302131714909434), KQU( 5581145789762985009), + KQU(12325955044293303233), KQU(17216111180742141204), + KQU( 6321919595276545740), KQU( 3507521147216174501), + KQU( 9659194593319481840), KQU(11473976005975358326), + KQU(14742730101435987026), KQU( 492845897709954780), + KQU(16976371186162599676), KQU(17712703422837648655), + KQU( 9881254778587061697), KQU( 8413223156302299551), + KQU( 1563841828254089168), KQU( 9996032758786671975), + KQU( 138877700583772667), KQU(13003043368574995989), + KQU( 4390573668650456587), KQU( 8610287390568126755), + KQU(15126904974266642199), KQU( 6703637238986057662), + KQU( 2873075592956810157), KQU( 6035080933946049418), + KQU(13382846581202353014), KQU( 7303971031814642463), + KQU(18418024405307444267), KQU( 5847096731675404647), + KQU( 4035880699639842500), KQU(11525348625112218478), + KQU( 3041162365459574102), KQU( 2604734487727986558), + KQU(15526341771636983145), KQU(14556052310697370254), + KQU(12997787077930808155), KQU( 9601806501755554499), + KQU(11349677952521423389), KQU(14956777807644899350), + KQU(16559736957742852721), KQU(12360828274778140726), + KQU( 6685373272009662513), KQU(16932258748055324130), + KQU(15918051131954158508), KQU( 1692312913140790144), + KQU( 546653826801637367), KQU( 5341587076045986652), + KQU(14975057236342585662), KQU(12374976357340622412), + KQU(10328833995181940552), KQU(12831807101710443149), + KQU(10548514914382545716), KQU( 2217806727199715993), + KQU(12627067369242845138), KQU( 4598965364035438158), + KQU( 150923352751318171), KQU(14274109544442257283), + KQU( 4696661475093863031), KQU( 1505764114384654516), + KQU(10699185831891495147), KQU( 2392353847713620519), + KQU( 3652870166711788383), KQU( 8640653276221911108), + KQU( 3894077592275889704), KQU( 4918592872135964845), + KQU(16379121273281400789), KQU(12058465483591683656), + KQU(11250106829302924945), KQU( 1147537556296983005), + KQU( 6376342756004613268), KQU(14967128191709280506), + KQU(18007449949790627628), KQU( 9497178279316537841), + KQU( 7920174844809394893), KQU(10037752595255719907), + KQU(15875342784985217697), KQU(15311615921712850696), + KQU( 9552902652110992950), KQU(14054979450099721140), + KQU( 5998709773566417349), KQU(18027910339276320187), + KQU( 8223099053868585554), KQU( 7842270354824999767), + KQU( 4896315688770080292), KQU(12969320296569787895), + KQU( 2674321489185759961), KQU( 4053615936864718439), + KQU(11349775270588617578), KQU( 4743019256284553975), + KQU( 5602100217469723769), KQU(14398995691411527813), + KQU( 7412170493796825470), KQU( 836262406131744846), + KQU( 8231086633845153022), KQU( 5161377920438552287), + KQU( 8828731196169924949), KQU(16211142246465502680), + KQU( 3307990879253687818), KQU( 5193405406899782022), + KQU( 8510842117467566693), KQU( 6070955181022405365), + KQU(14482950231361409799), KQU(12585159371331138077), + KQU( 3511537678933588148), KQU( 2041849474531116417), + KQU(10944936685095345792), KQU(18303116923079107729), + KQU( 2720566371239725320), KQU( 4958672473562397622), + KQU( 3032326668253243412), KQU(13689418691726908338), + KQU( 1895205511728843996), KQU( 8146303515271990527), + KQU(16507343500056113480), KQU( 473996939105902919), + KQU( 9897686885246881481), KQU(14606433762712790575), + KQU( 6732796251605566368), KQU( 1399778120855368916), + KQU( 935023885182833777), KQU(16066282816186753477), + KQU( 7291270991820612055), KQU(17530230393129853844), + KQU(10223493623477451366), KQU(15841725630495676683), + KQU(17379567246435515824), KQU( 8588251429375561971), + KQU(18339511210887206423), KQU(17349587430725976100), + KQU(12244876521394838088), KQU( 6382187714147161259), + KQU(12335807181848950831), KQU(16948885622305460665), + KQU(13755097796371520506), KQU(14806740373324947801), + KQU( 4828699633859287703), KQU( 8209879281452301604), + KQU(12435716669553736437), KQU(13970976859588452131), + KQU( 6233960842566773148), KQU(12507096267900505759), + KQU( 1198713114381279421), KQU(14989862731124149015), + KQU(15932189508707978949), KQU( 2526406641432708722), + KQU( 29187427817271982), KQU( 1499802773054556353), + KQU(10816638187021897173), KQU( 5436139270839738132), + KQU( 6659882287036010082), KQU( 2154048955317173697), + KQU(10887317019333757642), KQU(16281091802634424955), + KQU(10754549879915384901), KQU(10760611745769249815), + KQU( 2161505946972504002), KQU( 5243132808986265107), + KQU(10129852179873415416), KQU( 710339480008649081), + KQU( 7802129453068808528), KQU(17967213567178907213), + KQU(15730859124668605599), KQU(13058356168962376502), + KQU( 3701224985413645909), KQU(14464065869149109264), + KQU( 9959272418844311646), KQU(10157426099515958752), + KQU(14013736814538268528), KQU(17797456992065653951), + KQU(17418878140257344806), KQU(15457429073540561521), + KQU( 2184426881360949378), KQU( 2062193041154712416), + KQU( 8553463347406931661), KQU( 4913057625202871854), + KQU( 2668943682126618425), KQU(17064444737891172288), + KQU( 4997115903913298637), KQU(12019402608892327416), + KQU(17603584559765897352), KQU(11367529582073647975), + KQU( 8211476043518436050), KQU( 8676849804070323674), + KQU(18431829230394475730), KQU(10490177861361247904), + KQU( 9508720602025651349), KQU( 7409627448555722700), + KQU( 5804047018862729008), KQU(11943858176893142594), + KQU(11908095418933847092), KQU( 5415449345715887652), + KQU( 1554022699166156407), KQU( 9073322106406017161), + KQU( 7080630967969047082), KQU(18049736940860732943), + KQU(12748714242594196794), KQU( 1226992415735156741), + KQU(17900981019609531193), KQU(11720739744008710999), + KQU( 3006400683394775434), KQU(11347974011751996028), + KQU( 3316999628257954608), KQU( 8384484563557639101), + KQU(18117794685961729767), KQU( 1900145025596618194), + KQU(17459527840632892676), KQU( 5634784101865710994), + KQU( 7918619300292897158), KQU( 3146577625026301350), + KQU( 9955212856499068767), KQU( 1873995843681746975), + KQU( 1561487759967972194), KQU( 8322718804375878474), + KQU(11300284215327028366), KQU( 4667391032508998982), + KQU( 9820104494306625580), KQU(17922397968599970610), + KQU( 1784690461886786712), KQU(14940365084341346821), + KQU( 5348719575594186181), KQU(10720419084507855261), + KQU(14210394354145143274), KQU( 2426468692164000131), + KQU(16271062114607059202), KQU(14851904092357070247), + KQU( 6524493015693121897), KQU( 9825473835127138531), + KQU(14222500616268569578), KQU(15521484052007487468), + KQU(14462579404124614699), KQU(11012375590820665520), + KQU(11625327350536084927), KQU(14452017765243785417), + KQU( 9989342263518766305), KQU( 3640105471101803790), + KQU( 4749866455897513242), KQU(13963064946736312044), + KQU(10007416591973223791), KQU(18314132234717431115), + KQU( 3286596588617483450), KQU( 7726163455370818765), + KQU( 7575454721115379328), KQU( 5308331576437663422), + KQU(18288821894903530934), KQU( 8028405805410554106), + KQU(15744019832103296628), KQU( 149765559630932100), + KQU( 6137705557200071977), KQU(14513416315434803615), + KQU(11665702820128984473), KQU( 218926670505601386), + KQU( 6868675028717769519), KQU(15282016569441512302), + KQU( 5707000497782960236), KQU( 6671120586555079567), + KQU( 2194098052618985448), KQU(16849577895477330978), + KQU(12957148471017466283), KQU( 1997805535404859393), + KQU( 1180721060263860490), KQU(13206391310193756958), + KQU(12980208674461861797), KQU( 3825967775058875366), + KQU(17543433670782042631), KQU( 1518339070120322730), + KQU(16344584340890991669), KQU( 2611327165318529819), + KQU(11265022723283422529), KQU( 4001552800373196817), + KQU(14509595890079346161), KQU( 3528717165416234562), + KQU(18153222571501914072), KQU( 9387182977209744425), + KQU(10064342315985580021), KQU(11373678413215253977), + KQU( 2308457853228798099), KQU( 9729042942839545302), + KQU( 7833785471140127746), KQU( 6351049900319844436), + KQU(14454610627133496067), KQU(12533175683634819111), + KQU(15570163926716513029), KQU(13356980519185762498) +}; + +TEST_BEGIN(test_gen_rand_32) { + uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + int i; + uint32_t r32; + sfmt_t *ctx; + + assert_d_le(get_min_array_size32(), BLOCK_SIZE, + "Array size too small"); + ctx = init_gen_rand(1234); + fill_array32(ctx, array32, BLOCK_SIZE); + fill_array32(ctx, array32_2, BLOCK_SIZE); + fini_gen_rand(ctx); + + ctx = init_gen_rand(1234); + for (i = 0; i < BLOCK_SIZE; i++) { + if (i < COUNT_1) { + assert_u32_eq(array32[i], init_gen_rand_32_expected[i], + "Output mismatch for i=%d", i); + } + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32[i], + "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); + } + for (i = 0; i < COUNT_2; i++) { + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32_2[i], + "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], + r32); + } + fini_gen_rand(ctx); +} +TEST_END + +TEST_BEGIN(test_by_array_32) { + uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + int i; + uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0}; + uint32_t r32; + sfmt_t *ctx; + + assert_d_le(get_min_array_size32(), BLOCK_SIZE, + "Array size too small"); + ctx = init_by_array(ini, 4); + fill_array32(ctx, array32, BLOCK_SIZE); + fill_array32(ctx, array32_2, BLOCK_SIZE); + fini_gen_rand(ctx); + + ctx = init_by_array(ini, 4); + for (i = 0; i < BLOCK_SIZE; i++) { + if (i < COUNT_1) { + assert_u32_eq(array32[i], init_by_array_32_expected[i], + "Output mismatch for i=%d", i); + } + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32[i], + "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); + } + for (i = 0; i < COUNT_2; i++) { + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32_2[i], + "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], + r32); + } + fini_gen_rand(ctx); +} +TEST_END + +TEST_BEGIN(test_gen_rand_64) { + uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + int i; + uint64_t r; + sfmt_t *ctx; + + assert_d_le(get_min_array_size64(), BLOCK_SIZE64, + "Array size too small"); + ctx = init_gen_rand(4321); + fill_array64(ctx, array64, BLOCK_SIZE64); + fill_array64(ctx, array64_2, BLOCK_SIZE64); + fini_gen_rand(ctx); + + ctx = init_gen_rand(4321); + for (i = 0; i < BLOCK_SIZE64; i++) { + if (i < COUNT_1) { + assert_u64_eq(array64[i], init_gen_rand_64_expected[i], + "Output mismatch for i=%d", i); + } + r = gen_rand64(ctx); + assert_u64_eq(r, array64[i], + "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i, + array64[i], r); + } + for (i = 0; i < COUNT_2; i++) { + r = gen_rand64(ctx); + assert_u64_eq(r, array64_2[i], + "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i, + array64_2[i], r); + } + fini_gen_rand(ctx); +} +TEST_END + +TEST_BEGIN(test_by_array_64) { + uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + int i; + uint64_t r; + uint32_t ini[] = {5, 4, 3, 2, 1}; + sfmt_t *ctx; + + assert_d_le(get_min_array_size64(), BLOCK_SIZE64, + "Array size too small"); + ctx = init_by_array(ini, 5); + fill_array64(ctx, array64, BLOCK_SIZE64); + fill_array64(ctx, array64_2, BLOCK_SIZE64); + fini_gen_rand(ctx); + + ctx = init_by_array(ini, 5); + for (i = 0; i < BLOCK_SIZE64; i++) { + if (i < COUNT_1) { + assert_u64_eq(array64[i], init_by_array_64_expected[i], + "Output mismatch for i=%d", i); + } + r = gen_rand64(ctx); + assert_u64_eq(r, array64[i], + "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i, + array64[i], r); + } + for (i = 0; i < COUNT_2; i++) { + r = gen_rand64(ctx); + assert_u64_eq(r, array64_2[i], + "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i, + array64_2[i], r); + } + fini_gen_rand(ctx); +} +TEST_END + +int +main(void) { + return test( + test_gen_rand_32, + test_by_array_32, + test_gen_rand_64, + test_by_array_64); +} diff --git a/deps/jemalloc/test/unit/a0.c b/deps/jemalloc/test/unit/a0.c new file mode 100644 index 0000000..a27ab3f --- /dev/null +++ b/deps/jemalloc/test/unit/a0.c @@ -0,0 +1,16 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_a0) { + void *p; + + p = a0malloc(1); + assert_ptr_not_null(p, "Unexpected a0malloc() error"); + a0dalloc(p); +} +TEST_END + +int +main(void) { + return test_no_malloc_init( + test_a0); +} diff --git a/deps/jemalloc/test/unit/arena_reset.c b/deps/jemalloc/test/unit/arena_reset.c new file mode 100644 index 0000000..b182f31 --- /dev/null +++ b/deps/jemalloc/test/unit/arena_reset.c @@ -0,0 +1,349 @@ +#ifndef ARENA_RESET_PROF_C_ +#include "test/jemalloc_test.h" +#endif + +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/rtree.h" + +#include "test/extent_hooks.h" + +static unsigned +get_nsizes_impl(const char *cmd) { + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return ret; +} + +static unsigned +get_nsmall(void) { + return get_nsizes_impl("arenas.nbins"); +} + +static unsigned +get_nlarge(void) { + return get_nsizes_impl("arenas.nlextents"); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) { + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return ret; +} + +static size_t +get_small_size(size_t ind) { + return get_size_impl("arenas.bin.0.size", ind); +} + +static size_t +get_large_size(size_t ind) { + return get_size_impl("arenas.lextent.0.size", ind); +} + +/* Like ivsalloc(), but safe to call on discarded allocations. */ +static size_t +vsalloc(tsdn_t *tsdn, const void *ptr) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *extent; + szind_t szind; + if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, false, &extent, &szind)) { + return 0; + } + + if (extent == NULL) { + return 0; + } + if (extent_state_get(extent) != extent_state_active) { + return 0; + } + + if (szind == SC_NSIZES) { + return 0; + } + + return sz_index2size(szind); +} + +static unsigned +do_arena_create(extent_hooks_t *h) { + unsigned arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, + (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, + "Unexpected mallctl() failure"); + return arena_ind; +} + +static void +do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) { +#define NLARGE 32 + unsigned nsmall, nlarge, i; + size_t sz; + int flags; + tsdn_t *tsdn; + + flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + + nsmall = get_nsmall(); + nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge(); + *nptrs = nsmall + nlarge; + *ptrs = (void **)malloc(*nptrs * sizeof(void *)); + assert_ptr_not_null(*ptrs, "Unexpected malloc() failure"); + + /* Allocate objects with a wide range of sizes. */ + for (i = 0; i < nsmall; i++) { + sz = get_small_size(i); + (*ptrs)[i] = mallocx(sz, flags); + assert_ptr_not_null((*ptrs)[i], + "Unexpected mallocx(%zu, %#x) failure", sz, flags); + } + for (i = 0; i < nlarge; i++) { + sz = get_large_size(i); + (*ptrs)[nsmall + i] = mallocx(sz, flags); + assert_ptr_not_null((*ptrs)[i], + "Unexpected mallocx(%zu, %#x) failure", sz, flags); + } + + tsdn = tsdn_fetch(); + + /* Verify allocations. */ + for (i = 0; i < *nptrs; i++) { + assert_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0, + "Allocation should have queryable size"); + } +} + +static void +do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) { + tsdn_t *tsdn; + unsigned i; + + tsdn = tsdn_fetch(); + + if (have_background_thread) { + malloc_mutex_lock(tsdn, + &background_thread_info_get(arena_ind)->mtx); + } + /* Verify allocations no longer exist. */ + for (i = 0; i < nptrs; i++) { + assert_zu_eq(vsalloc(tsdn, ptrs[i]), 0, + "Allocation should no longer exist"); + } + if (have_background_thread) { + malloc_mutex_unlock(tsdn, + &background_thread_info_get(arena_ind)->mtx); + } + + free(ptrs); +} + +static void +do_arena_reset_destroy(const char *name, unsigned arena_ind) { + size_t mib[3]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib(name, mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +static void +do_arena_reset(unsigned arena_ind) { + do_arena_reset_destroy("arena.0.reset", arena_ind); +} + +static void +do_arena_destroy(unsigned arena_ind) { + do_arena_reset_destroy("arena.0.destroy", arena_ind); +} + +TEST_BEGIN(test_arena_reset) { + unsigned arena_ind; + void **ptrs; + unsigned nptrs; + + arena_ind = do_arena_create(NULL); + do_arena_reset_pre(arena_ind, &ptrs, &nptrs); + do_arena_reset(arena_ind); + do_arena_reset_post(ptrs, nptrs, arena_ind); +} +TEST_END + +static bool +arena_i_initialized(unsigned arena_ind, bool refresh) { + bool initialized; + size_t mib[3]; + size_t miblen, sz; + + if (refresh) { + uint64_t epoch = 1; + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(epoch)), 0, "Unexpected mallctl() failure"); + } + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + sz = sizeof(initialized); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL, + 0), 0, "Unexpected mallctlbymib() failure"); + + return initialized; +} + +TEST_BEGIN(test_arena_destroy_initial) { + assert_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), + "Destroyed arena stats should not be initialized"); +} +TEST_END + +TEST_BEGIN(test_arena_destroy_hooks_default) { + unsigned arena_ind, arena_ind_another, arena_ind_prev; + void **ptrs; + unsigned nptrs; + + arena_ind = do_arena_create(NULL); + do_arena_reset_pre(arena_ind, &ptrs, &nptrs); + + assert_false(arena_i_initialized(arena_ind, false), + "Arena stats should not be initialized"); + assert_true(arena_i_initialized(arena_ind, true), + "Arena stats should be initialized"); + + /* + * Create another arena before destroying one, to better verify arena + * index reuse. + */ + arena_ind_another = do_arena_create(NULL); + + do_arena_destroy(arena_ind); + + assert_false(arena_i_initialized(arena_ind, true), + "Arena stats should not be initialized"); + assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), + "Destroyed arena stats should be initialized"); + + do_arena_reset_post(ptrs, nptrs, arena_ind); + + arena_ind_prev = arena_ind; + arena_ind = do_arena_create(NULL); + do_arena_reset_pre(arena_ind, &ptrs, &nptrs); + assert_u_eq(arena_ind, arena_ind_prev, + "Arena index should have been recycled"); + do_arena_destroy(arena_ind); + do_arena_reset_post(ptrs, nptrs, arena_ind); + + do_arena_destroy(arena_ind_another); +} +TEST_END + +/* + * Actually unmap extents, regardless of opt_retain, so that attempts to access + * a destroyed arena's memory will segfault. + */ +static bool +extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " + "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? + "true" : "false", arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap, + "Wrong hook function"); + called_dalloc = true; + if (!try_dalloc) { + return true; + } + did_dalloc = true; + if (!maps_coalesce && opt_retain) { + return true; + } + pages_unmap(addr, size); + return false; +} + +static extent_hooks_t hooks_orig; + +static extent_hooks_t hooks_unmap = { + extent_alloc_hook, + extent_dalloc_unmap, /* dalloc */ + extent_destroy_hook, + extent_commit_hook, + extent_decommit_hook, + extent_purge_lazy_hook, + extent_purge_forced_hook, + extent_split_hook, + extent_merge_hook +}; + +TEST_BEGIN(test_arena_destroy_hooks_unmap) { + unsigned arena_ind; + void **ptrs; + unsigned nptrs; + + extent_hooks_prep(); + if (maps_coalesce) { + try_decommit = false; + } + memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); + memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t)); + + did_alloc = false; + arena_ind = do_arena_create(&hooks); + do_arena_reset_pre(arena_ind, &ptrs, &nptrs); + + assert_true(did_alloc, "Expected alloc"); + + assert_false(arena_i_initialized(arena_ind, false), + "Arena stats should not be initialized"); + assert_true(arena_i_initialized(arena_ind, true), + "Arena stats should be initialized"); + + did_dalloc = false; + do_arena_destroy(arena_ind); + assert_true(did_dalloc, "Expected dalloc"); + + assert_false(arena_i_initialized(arena_ind, true), + "Arena stats should not be initialized"); + assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), + "Destroyed arena stats should be initialized"); + + do_arena_reset_post(ptrs, nptrs, arena_ind); + + memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); +} +TEST_END + +int +main(void) { + return test( + test_arena_reset, + test_arena_destroy_initial, + test_arena_destroy_hooks_default, + test_arena_destroy_hooks_unmap); +} diff --git a/deps/jemalloc/test/unit/arena_reset_prof.c b/deps/jemalloc/test/unit/arena_reset_prof.c new file mode 100644 index 0000000..38d8012 --- /dev/null +++ b/deps/jemalloc/test/unit/arena_reset_prof.c @@ -0,0 +1,4 @@ +#include "test/jemalloc_test.h" +#define ARENA_RESET_PROF_C_ + +#include "arena_reset.c" diff --git a/deps/jemalloc/test/unit/arena_reset_prof.sh b/deps/jemalloc/test/unit/arena_reset_prof.sh new file mode 100644 index 0000000..041dc1c --- /dev/null +++ b/deps/jemalloc/test/unit/arena_reset_prof.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +export MALLOC_CONF="prof:true,lg_prof_sample:0" diff --git a/deps/jemalloc/test/unit/atomic.c b/deps/jemalloc/test/unit/atomic.c new file mode 100644 index 0000000..572d8d2 --- /dev/null +++ b/deps/jemalloc/test/unit/atomic.c @@ -0,0 +1,229 @@ +#include "test/jemalloc_test.h" + +/* + * We *almost* have consistent short names (e.g. "u32" for uint32_t, "b" for + * bool, etc. The one exception is that the short name for void * is "p" in + * some places and "ptr" in others. In the long run it would be nice to unify + * these, but in the short run we'll use this shim. + */ +#define assert_p_eq assert_ptr_eq + +/* + * t: the non-atomic type, like "uint32_t". + * ta: the short name for the type, like "u32". + * val[1,2,3]: Values of the given type. The CAS tests use val2 for expected, + * and val3 for desired. + */ + +#define DO_TESTS(t, ta, val1, val2, val3) do { \ + t val; \ + t expected; \ + bool success; \ + /* This (along with the load below) also tests ATOMIC_LOAD. */ \ + atomic_##ta##_t atom = ATOMIC_INIT(val1); \ + \ + /* ATOMIC_INIT and load. */ \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, "Load or init failed"); \ + \ + /* Store. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + atomic_store_##ta(&atom, val2, ATOMIC_RELAXED); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val2, val, "Store failed"); \ + \ + /* Exchange. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_exchange_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, "Exchange returned invalid value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val2, val, "Exchange store invalid value"); \ + \ + /* \ + * Weak CAS. Spurious failures are allowed, so we loop a few \ + * times. \ + */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + success = false; \ + for (int i = 0; i < 10 && !success; i++) { \ + expected = val2; \ + success = atomic_compare_exchange_weak_##ta(&atom, \ + &expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, expected, \ + "CAS should update expected"); \ + } \ + assert_b_eq(val1 == val2, success, \ + "Weak CAS did the wrong state update"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + if (success) { \ + assert_##ta##_eq(val3, val, \ + "Successful CAS should update atomic"); \ + } else { \ + assert_##ta##_eq(val1, val, \ + "Unsuccessful CAS should not update atomic"); \ + } \ + \ + /* Strong CAS. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + expected = val2; \ + success = atomic_compare_exchange_strong_##ta(&atom, &expected, \ + val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \ + assert_b_eq(val1 == val2, success, \ + "Strong CAS did the wrong state update"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + if (success) { \ + assert_##ta##_eq(val3, val, \ + "Successful CAS should update atomic"); \ + } else { \ + assert_##ta##_eq(val1, val, \ + "Unsuccessful CAS should not update atomic"); \ + } \ + \ + \ +} while (0) + +#define DO_INTEGER_TESTS(t, ta, val1, val2) do { \ + atomic_##ta##_t atom; \ + t val; \ + \ + /* Fetch-add. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_add_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-add should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 + val2, val, \ + "Fetch-add should update atomic"); \ + \ + /* Fetch-sub. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_sub_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-sub should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 - val2, val, \ + "Fetch-sub should update atomic"); \ + \ + /* Fetch-and. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_and_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-and should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 & val2, val, \ + "Fetch-and should update atomic"); \ + \ + /* Fetch-or. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_or_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-or should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 | val2, val, \ + "Fetch-or should update atomic"); \ + \ + /* Fetch-xor. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_xor_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-xor should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 ^ val2, val, \ + "Fetch-xor should update atomic"); \ +} while (0) + +#define TEST_STRUCT(t, ta) \ +typedef struct { \ + t val1; \ + t val2; \ + t val3; \ +} ta##_test_t; + +#define TEST_CASES(t) { \ + {(t)-1, (t)-1, (t)-2}, \ + {(t)-1, (t) 0, (t)-2}, \ + {(t)-1, (t) 1, (t)-2}, \ + \ + {(t) 0, (t)-1, (t)-2}, \ + {(t) 0, (t) 0, (t)-2}, \ + {(t) 0, (t) 1, (t)-2}, \ + \ + {(t) 1, (t)-1, (t)-2}, \ + {(t) 1, (t) 0, (t)-2}, \ + {(t) 1, (t) 1, (t)-2}, \ + \ + {(t)0, (t)-(1 << 22), (t)-2}, \ + {(t)0, (t)(1 << 22), (t)-2}, \ + {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \ + {(t)(1 << 22), (t)(1 << 22), (t)-2} \ +} + +#define TEST_BODY(t, ta) do { \ + const ta##_test_t tests[] = TEST_CASES(t); \ + for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \ + ta##_test_t test = tests[i]; \ + DO_TESTS(t, ta, test.val1, test.val2, test.val3); \ + } \ +} while (0) + +#define INTEGER_TEST_BODY(t, ta) do { \ + const ta##_test_t tests[] = TEST_CASES(t); \ + for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \ + ta##_test_t test = tests[i]; \ + DO_TESTS(t, ta, test.val1, test.val2, test.val3); \ + DO_INTEGER_TESTS(t, ta, test.val1, test.val2); \ + } \ +} while (0) + +TEST_STRUCT(uint64_t, u64); +TEST_BEGIN(test_atomic_u64) { +#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) + test_skip("64-bit atomic operations not supported"); +#else + INTEGER_TEST_BODY(uint64_t, u64); +#endif +} +TEST_END + + +TEST_STRUCT(uint32_t, u32); +TEST_BEGIN(test_atomic_u32) { + INTEGER_TEST_BODY(uint32_t, u32); +} +TEST_END + +TEST_STRUCT(void *, p); +TEST_BEGIN(test_atomic_p) { + TEST_BODY(void *, p); +} +TEST_END + +TEST_STRUCT(size_t, zu); +TEST_BEGIN(test_atomic_zu) { + INTEGER_TEST_BODY(size_t, zu); +} +TEST_END + +TEST_STRUCT(ssize_t, zd); +TEST_BEGIN(test_atomic_zd) { + INTEGER_TEST_BODY(ssize_t, zd); +} +TEST_END + + +TEST_STRUCT(unsigned, u); +TEST_BEGIN(test_atomic_u) { + INTEGER_TEST_BODY(unsigned, u); +} +TEST_END + +int +main(void) { + return test( + test_atomic_u64, + test_atomic_u32, + test_atomic_p, + test_atomic_zu, + test_atomic_zd, + test_atomic_u); +} diff --git a/deps/jemalloc/test/unit/background_thread.c b/deps/jemalloc/test/unit/background_thread.c new file mode 100644 index 0000000..f7bd37c --- /dev/null +++ b/deps/jemalloc/test/unit/background_thread.c @@ -0,0 +1,119 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/util.h" + +static void +test_switch_background_thread_ctl(bool new_val) { + bool e0, e1; + size_t sz = sizeof(bool); + + e1 = new_val; + assert_d_eq(mallctl("background_thread", (void *)&e0, &sz, + &e1, sz), 0, "Unexpected mallctl() failure"); + assert_b_eq(e0, !e1, + "background_thread should be %d before.\n", !e1); + if (e1) { + assert_zu_gt(n_background_threads, 0, + "Number of background threads should be non zero.\n"); + } else { + assert_zu_eq(n_background_threads, 0, + "Number of background threads should be zero.\n"); + } +} + +static void +test_repeat_background_thread_ctl(bool before) { + bool e0, e1; + size_t sz = sizeof(bool); + + e1 = before; + assert_d_eq(mallctl("background_thread", (void *)&e0, &sz, + &e1, sz), 0, "Unexpected mallctl() failure"); + assert_b_eq(e0, before, + "background_thread should be %d.\n", before); + if (e1) { + assert_zu_gt(n_background_threads, 0, + "Number of background threads should be non zero.\n"); + } else { + assert_zu_eq(n_background_threads, 0, + "Number of background threads should be zero.\n"); + } +} + +TEST_BEGIN(test_background_thread_ctl) { + test_skip_if(!have_background_thread); + + bool e0, e1; + size_t sz = sizeof(bool); + + assert_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("background_thread", (void *)&e1, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + assert_b_eq(e0, e1, + "Default and opt.background_thread does not match.\n"); + if (e0) { + test_switch_background_thread_ctl(false); + } + assert_zu_eq(n_background_threads, 0, + "Number of background threads should be 0.\n"); + + for (unsigned i = 0; i < 4; i++) { + test_switch_background_thread_ctl(true); + test_repeat_background_thread_ctl(true); + test_repeat_background_thread_ctl(true); + + test_switch_background_thread_ctl(false); + test_repeat_background_thread_ctl(false); + test_repeat_background_thread_ctl(false); + } +} +TEST_END + +TEST_BEGIN(test_background_thread_running) { + test_skip_if(!have_background_thread); + test_skip_if(!config_stats); + +#if defined(JEMALLOC_BACKGROUND_THREAD) + tsd_t *tsd = tsd_fetch(); + background_thread_info_t *info = &background_thread_info[0]; + + test_repeat_background_thread_ctl(false); + test_switch_background_thread_ctl(true); + assert_b_eq(info->state, background_thread_started, + "Background_thread did not start.\n"); + + nstime_t start, now; + nstime_init(&start, 0); + nstime_update(&start); + + bool ran = false; + while (true) { + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + if (info->tot_n_runs > 0) { + ran = true; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + if (ran) { + break; + } + + nstime_init(&now, 0); + nstime_update(&now); + nstime_subtract(&now, &start); + assert_u64_lt(nstime_sec(&now), 1000, + "Background threads did not run for 1000 seconds."); + sleep(1); + } + test_switch_background_thread_ctl(false); +#endif +} +TEST_END + +int +main(void) { + /* Background_thread creation tests reentrancy naturally. */ + return test_no_reentrancy( + test_background_thread_ctl, + test_background_thread_running); +} diff --git a/deps/jemalloc/test/unit/background_thread_enable.c b/deps/jemalloc/test/unit/background_thread_enable.c new file mode 100644 index 0000000..d894e93 --- /dev/null +++ b/deps/jemalloc/test/unit/background_thread_enable.c @@ -0,0 +1,85 @@ +#include "test/jemalloc_test.h" + +const char *malloc_conf = "background_thread:false,narenas:1,max_background_threads:20"; + +TEST_BEGIN(test_deferred) { + test_skip_if(!have_background_thread); + + unsigned id; + size_t sz_u = sizeof(unsigned); + + /* + * 10 here is somewhat arbitrary, except insofar as we want to ensure + * that the number of background threads is smaller than the number of + * arenas. I'll ragequit long before we have to spin up 10 threads per + * cpu to handle background purging, so this is a conservative + * approximation. + */ + for (unsigned i = 0; i < 10 * ncpus; i++) { + assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0, + "Failed to create arena"); + } + + bool enable = true; + size_t sz_b = sizeof(bool); + assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, + "Failed to enable background threads"); + enable = false; + assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, + "Failed to disable background threads"); +} +TEST_END + +TEST_BEGIN(test_max_background_threads) { + test_skip_if(!have_background_thread); + + size_t max_n_thds; + size_t opt_max_n_thds; + size_t sz_m = sizeof(max_n_thds); + assert_d_eq(mallctl("opt.max_background_threads", + &opt_max_n_thds, &sz_m, NULL, 0), 0, + "Failed to get opt.max_background_threads"); + assert_d_eq(mallctl("max_background_threads", &max_n_thds, &sz_m, NULL, + 0), 0, "Failed to get max background threads"); + assert_zu_eq(opt_max_n_thds, max_n_thds, + "max_background_threads and " + "opt.max_background_threads should match"); + assert_d_eq(mallctl("max_background_threads", NULL, NULL, &max_n_thds, + sz_m), 0, "Failed to set max background threads"); + + unsigned id; + size_t sz_u = sizeof(unsigned); + + for (unsigned i = 0; i < 10 * ncpus; i++) { + assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0, + "Failed to create arena"); + } + + bool enable = true; + size_t sz_b = sizeof(bool); + assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, + "Failed to enable background threads"); + assert_zu_eq(n_background_threads, max_n_thds, + "Number of background threads should not change.\n"); + size_t new_max_thds = max_n_thds - 1; + if (new_max_thds > 0) { + assert_d_eq(mallctl("max_background_threads", NULL, NULL, + &new_max_thds, sz_m), 0, + "Failed to set max background threads"); + assert_zu_eq(n_background_threads, new_max_thds, + "Number of background threads should decrease by 1.\n"); + } + new_max_thds = 1; + assert_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds, + sz_m), 0, "Failed to set max background threads"); + assert_zu_eq(n_background_threads, new_max_thds, + "Number of background threads should be 1.\n"); +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_deferred, + test_max_background_threads); +} diff --git a/deps/jemalloc/test/unit/base.c b/deps/jemalloc/test/unit/base.c new file mode 100644 index 0000000..6b792cf --- /dev/null +++ b/deps/jemalloc/test/unit/base.c @@ -0,0 +1,234 @@ +#include "test/jemalloc_test.h" + +#include "test/extent_hooks.h" + +static extent_hooks_t hooks_null = { + extent_alloc_hook, + NULL, /* dalloc */ + NULL, /* destroy */ + NULL, /* commit */ + NULL, /* decommit */ + NULL, /* purge_lazy */ + NULL, /* purge_forced */ + NULL, /* split */ + NULL /* merge */ +}; + +static extent_hooks_t hooks_not_null = { + extent_alloc_hook, + extent_dalloc_hook, + extent_destroy_hook, + NULL, /* commit */ + extent_decommit_hook, + extent_purge_lazy_hook, + extent_purge_forced_hook, + NULL, /* split */ + NULL /* merge */ +}; + +TEST_BEGIN(test_base_hooks_default) { + base_t *base; + size_t allocated0, allocated1, resident, mapped, n_thp; + + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); + base = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); + + if (config_stats) { + base_stats_get(tsdn, base, &allocated0, &resident, &mapped, + &n_thp); + assert_zu_ge(allocated0, sizeof(base_t), + "Base header should count as allocated"); + if (opt_metadata_thp == metadata_thp_always) { + assert_zu_gt(n_thp, 0, + "Base should have 1 THP at least."); + } + } + + assert_ptr_not_null(base_alloc(tsdn, base, 42, 1), + "Unexpected base_alloc() failure"); + + if (config_stats) { + base_stats_get(tsdn, base, &allocated1, &resident, &mapped, + &n_thp); + assert_zu_ge(allocated1 - allocated0, 42, + "At least 42 bytes were allocated by base_alloc()"); + } + + base_delete(tsdn, base); +} +TEST_END + +TEST_BEGIN(test_base_hooks_null) { + extent_hooks_t hooks_orig; + base_t *base; + size_t allocated0, allocated1, resident, mapped, n_thp; + + extent_hooks_prep(); + try_dalloc = false; + try_destroy = true; + try_decommit = false; + try_purge_lazy = false; + try_purge_forced = false; + memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); + memcpy(&hooks, &hooks_null, sizeof(extent_hooks_t)); + + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); + base = base_new(tsdn, 0, &hooks); + assert_ptr_not_null(base, "Unexpected base_new() failure"); + + if (config_stats) { + base_stats_get(tsdn, base, &allocated0, &resident, &mapped, + &n_thp); + assert_zu_ge(allocated0, sizeof(base_t), + "Base header should count as allocated"); + if (opt_metadata_thp == metadata_thp_always) { + assert_zu_gt(n_thp, 0, + "Base should have 1 THP at least."); + } + } + + assert_ptr_not_null(base_alloc(tsdn, base, 42, 1), + "Unexpected base_alloc() failure"); + + if (config_stats) { + base_stats_get(tsdn, base, &allocated1, &resident, &mapped, + &n_thp); + assert_zu_ge(allocated1 - allocated0, 42, + "At least 42 bytes were allocated by base_alloc()"); + } + + base_delete(tsdn, base); + + memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); +} +TEST_END + +TEST_BEGIN(test_base_hooks_not_null) { + extent_hooks_t hooks_orig; + base_t *base; + void *p, *q, *r, *r_exp; + + extent_hooks_prep(); + try_dalloc = false; + try_destroy = true; + try_decommit = false; + try_purge_lazy = false; + try_purge_forced = false; + memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); + memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t)); + + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); + did_alloc = false; + base = base_new(tsdn, 0, &hooks); + assert_ptr_not_null(base, "Unexpected base_new() failure"); + assert_true(did_alloc, "Expected alloc"); + + /* + * Check for tight packing at specified alignment under simple + * conditions. + */ + { + const size_t alignments[] = { + 1, + QUANTUM, + QUANTUM << 1, + CACHELINE, + CACHELINE << 1, + }; + unsigned i; + + for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) { + size_t alignment = alignments[i]; + size_t align_ceil = ALIGNMENT_CEILING(alignment, + QUANTUM); + p = base_alloc(tsdn, base, 1, alignment); + assert_ptr_not_null(p, + "Unexpected base_alloc() failure"); + assert_ptr_eq(p, + (void *)(ALIGNMENT_CEILING((uintptr_t)p, + alignment)), "Expected quantum alignment"); + q = base_alloc(tsdn, base, alignment, alignment); + assert_ptr_not_null(q, + "Unexpected base_alloc() failure"); + assert_ptr_eq((void *)((uintptr_t)p + align_ceil), q, + "Minimal allocation should take up %zu bytes", + align_ceil); + r = base_alloc(tsdn, base, 1, alignment); + assert_ptr_not_null(r, + "Unexpected base_alloc() failure"); + assert_ptr_eq((void *)((uintptr_t)q + align_ceil), r, + "Minimal allocation should take up %zu bytes", + align_ceil); + } + } + + /* + * Allocate an object that cannot fit in the first block, then verify + * that the first block's remaining space is considered for subsequent + * allocation. + */ + assert_zu_ge(extent_bsize_get(&base->blocks->extent), QUANTUM, + "Remainder insufficient for test"); + /* Use up all but one quantum of block. */ + while (extent_bsize_get(&base->blocks->extent) > QUANTUM) { + p = base_alloc(tsdn, base, QUANTUM, QUANTUM); + assert_ptr_not_null(p, "Unexpected base_alloc() failure"); + } + r_exp = extent_addr_get(&base->blocks->extent); + assert_zu_eq(base->extent_sn_next, 1, "One extant block expected"); + q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM); + assert_ptr_not_null(q, "Unexpected base_alloc() failure"); + assert_ptr_ne(q, r_exp, "Expected allocation from new block"); + assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected"); + r = base_alloc(tsdn, base, QUANTUM, QUANTUM); + assert_ptr_not_null(r, "Unexpected base_alloc() failure"); + assert_ptr_eq(r, r_exp, "Expected allocation from first block"); + assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected"); + + /* + * Check for proper alignment support when normal blocks are too small. + */ + { + const size_t alignments[] = { + HUGEPAGE, + HUGEPAGE << 1 + }; + unsigned i; + + for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) { + size_t alignment = alignments[i]; + p = base_alloc(tsdn, base, QUANTUM, alignment); + assert_ptr_not_null(p, + "Unexpected base_alloc() failure"); + assert_ptr_eq(p, + (void *)(ALIGNMENT_CEILING((uintptr_t)p, + alignment)), "Expected %zu-byte alignment", + alignment); + } + } + + called_dalloc = called_destroy = called_decommit = called_purge_lazy = + called_purge_forced = false; + base_delete(tsdn, base); + assert_true(called_dalloc, "Expected dalloc call"); + assert_true(!called_destroy, "Unexpected destroy call"); + assert_true(called_decommit, "Expected decommit call"); + assert_true(called_purge_lazy, "Expected purge_lazy call"); + assert_true(called_purge_forced, "Expected purge_forced call"); + + try_dalloc = true; + try_destroy = true; + try_decommit = true; + try_purge_lazy = true; + try_purge_forced = true; + memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); +} +TEST_END + +int +main(void) { + return test( + test_base_hooks_default, + test_base_hooks_null, + test_base_hooks_not_null); +} diff --git a/deps/jemalloc/test/unit/binshard.c b/deps/jemalloc/test/unit/binshard.c new file mode 100644 index 0000000..d7a8df8 --- /dev/null +++ b/deps/jemalloc/test/unit/binshard.c @@ -0,0 +1,154 @@ +#include "test/jemalloc_test.h" + +/* Config -- "narenas:1,bin_shards:1-160:16|129-512:4|256-256:8" */ + +#define NTHREADS 16 +#define REMOTE_NALLOC 256 + +static void * +thd_producer(void *varg) { + void **mem = varg; + unsigned arena, i; + size_t sz; + + sz = sizeof(arena); + /* Remote arena. */ + assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + for (i = 0; i < REMOTE_NALLOC / 2; i++) { + mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena)); + } + + /* Remote bin. */ + for (; i < REMOTE_NALLOC; i++) { + mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(0)); + } + + return NULL; +} + +TEST_BEGIN(test_producer_consumer) { + thd_t thds[NTHREADS]; + void *mem[NTHREADS][REMOTE_NALLOC]; + unsigned i; + + /* Create producer threads to allocate. */ + for (i = 0; i < NTHREADS; i++) { + thd_create(&thds[i], thd_producer, mem[i]); + } + for (i = 0; i < NTHREADS; i++) { + thd_join(thds[i], NULL); + } + /* Remote deallocation by the current thread. */ + for (i = 0; i < NTHREADS; i++) { + for (unsigned j = 0; j < REMOTE_NALLOC; j++) { + assert_ptr_not_null(mem[i][j], + "Unexpected remote allocation failure"); + dallocx(mem[i][j], 0); + } + } +} +TEST_END + +static void * +thd_start(void *varg) { + void *ptr, *ptr2; + extent_t *extent; + unsigned shard1, shard2; + + tsdn_t *tsdn = tsdn_fetch(); + /* Try triggering allocations from sharded bins. */ + for (unsigned i = 0; i < 1024; i++) { + ptr = mallocx(1, MALLOCX_TCACHE_NONE); + ptr2 = mallocx(129, MALLOCX_TCACHE_NONE); + + extent = iealloc(tsdn, ptr); + shard1 = extent_binshard_get(extent); + dallocx(ptr, 0); + assert_u_lt(shard1, 16, "Unexpected bin shard used"); + + extent = iealloc(tsdn, ptr2); + shard2 = extent_binshard_get(extent); + dallocx(ptr2, 0); + assert_u_lt(shard2, 4, "Unexpected bin shard used"); + + if (shard1 > 0 || shard2 > 0) { + /* Triggered sharded bin usage. */ + return (void *)(uintptr_t)shard1; + } + } + + return NULL; +} + +TEST_BEGIN(test_bin_shard_mt) { + test_skip_if(have_percpu_arena && + PERCPU_ARENA_ENABLED(opt_percpu_arena)); + + thd_t thds[NTHREADS]; + unsigned i; + for (i = 0; i < NTHREADS; i++) { + thd_create(&thds[i], thd_start, NULL); + } + bool sharded = false; + for (i = 0; i < NTHREADS; i++) { + void *ret; + thd_join(thds[i], &ret); + if (ret != NULL) { + sharded = true; + } + } + assert_b_eq(sharded, true, "Did not find sharded bins"); +} +TEST_END + +TEST_BEGIN(test_bin_shard) { + unsigned nbins, i; + size_t mib[4], mib2[4]; + size_t miblen, miblen2, len; + + len = sizeof(nbins); + assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0, + "Unexpected mallctl() failure"); + + miblen = 4; + assert_d_eq(mallctlnametomib("arenas.bin.0.nshards", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + miblen2 = 4; + assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib2, &miblen2), 0, + "Unexpected mallctlnametomib() failure"); + + for (i = 0; i < nbins; i++) { + uint32_t nshards; + size_t size, sz1, sz2; + + mib[2] = i; + sz1 = sizeof(nshards); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&nshards, &sz1, + NULL, 0), 0, "Unexpected mallctlbymib() failure"); + + mib2[2] = i; + sz2 = sizeof(size); + assert_d_eq(mallctlbymib(mib2, miblen2, (void *)&size, &sz2, + NULL, 0), 0, "Unexpected mallctlbymib() failure"); + + if (size >= 1 && size <= 128) { + assert_u_eq(nshards, 16, "Unexpected nshards"); + } else if (size == 256) { + assert_u_eq(nshards, 8, "Unexpected nshards"); + } else if (size > 128 && size <= 512) { + assert_u_eq(nshards, 4, "Unexpected nshards"); + } else { + assert_u_eq(nshards, 1, "Unexpected nshards"); + } + } +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_bin_shard, + test_bin_shard_mt, + test_producer_consumer); +} diff --git a/deps/jemalloc/test/unit/binshard.sh b/deps/jemalloc/test/unit/binshard.sh new file mode 100644 index 0000000..c1d58c8 --- /dev/null +++ b/deps/jemalloc/test/unit/binshard.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +export MALLOC_CONF="narenas:1,bin_shards:1-160:16|129-512:4|256-256:8" diff --git a/deps/jemalloc/test/unit/bit_util.c b/deps/jemalloc/test/unit/bit_util.c new file mode 100644 index 0000000..b747deb --- /dev/null +++ b/deps/jemalloc/test/unit/bit_util.c @@ -0,0 +1,111 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/bit_util.h" + +#define TEST_POW2_CEIL(t, suf, pri) do { \ + unsigned i, pow2; \ + t x; \ + \ + assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \ + \ + for (i = 0; i < sizeof(t) * 8; i++) { \ + assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \ + << i, "Unexpected result"); \ + } \ + \ + for (i = 2; i < sizeof(t) * 8; i++) { \ + assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \ + ((t)1) << i, "Unexpected result"); \ + } \ + \ + for (i = 0; i < sizeof(t) * 8 - 1; i++) { \ + assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \ + ((t)1) << (i+1), "Unexpected result"); \ + } \ + \ + for (pow2 = 1; pow2 < 25; pow2++) { \ + for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \ + x++) { \ + assert_##suf##_eq(pow2_ceil_##suf(x), \ + ((t)1) << pow2, \ + "Unexpected result, x=%"pri, x); \ + } \ + } \ +} while (0) + +TEST_BEGIN(test_pow2_ceil_u64) { + TEST_POW2_CEIL(uint64_t, u64, FMTu64); +} +TEST_END + +TEST_BEGIN(test_pow2_ceil_u32) { + TEST_POW2_CEIL(uint32_t, u32, FMTu32); +} +TEST_END + +TEST_BEGIN(test_pow2_ceil_zu) { + TEST_POW2_CEIL(size_t, zu, "zu"); +} +TEST_END + +void +assert_lg_ceil_range(size_t input, unsigned answer) { + if (input == 1) { + assert_u_eq(0, answer, "Got %u as lg_ceil of 1", answer); + return; + } + assert_zu_le(input, (ZU(1) << answer), + "Got %u as lg_ceil of %zu", answer, input); + assert_zu_gt(input, (ZU(1) << (answer - 1)), + "Got %u as lg_ceil of %zu", answer, input); +} + +void +assert_lg_floor_range(size_t input, unsigned answer) { + if (input == 1) { + assert_u_eq(0, answer, "Got %u as lg_floor of 1", answer); + return; + } + assert_zu_ge(input, (ZU(1) << answer), + "Got %u as lg_floor of %zu", answer, input); + assert_zu_lt(input, (ZU(1) << (answer + 1)), + "Got %u as lg_floor of %zu", answer, input); +} + +TEST_BEGIN(test_lg_ceil_floor) { + for (size_t i = 1; i < 10 * 1000 * 1000; i++) { + assert_lg_ceil_range(i, lg_ceil(i)); + assert_lg_ceil_range(i, LG_CEIL(i)); + assert_lg_floor_range(i, lg_floor(i)); + assert_lg_floor_range(i, LG_FLOOR(i)); + } + for (int i = 10; i < 8 * (1 << LG_SIZEOF_PTR) - 5; i++) { + for (size_t j = 0; j < (1 << 4); j++) { + size_t num1 = ((size_t)1 << i) + - j * ((size_t)1 << (i - 4)); + size_t num2 = ((size_t)1 << i) + + j * ((size_t)1 << (i - 4)); + assert_zu_ne(num1, 0, "Invalid lg argument"); + assert_zu_ne(num2, 0, "Invalid lg argument"); + assert_lg_ceil_range(num1, lg_ceil(num1)); + assert_lg_ceil_range(num1, LG_CEIL(num1)); + assert_lg_ceil_range(num2, lg_ceil(num2)); + assert_lg_ceil_range(num2, LG_CEIL(num2)); + + assert_lg_floor_range(num1, lg_floor(num1)); + assert_lg_floor_range(num1, LG_FLOOR(num1)); + assert_lg_floor_range(num2, lg_floor(num2)); + assert_lg_floor_range(num2, LG_FLOOR(num2)); + } + } +} +TEST_END + +int +main(void) { + return test( + test_pow2_ceil_u64, + test_pow2_ceil_u32, + test_pow2_ceil_zu, + test_lg_ceil_floor); +} diff --git a/deps/jemalloc/test/unit/bitmap.c b/deps/jemalloc/test/unit/bitmap.c new file mode 100644 index 0000000..cafb203 --- /dev/null +++ b/deps/jemalloc/test/unit/bitmap.c @@ -0,0 +1,431 @@ +#include "test/jemalloc_test.h" + +#define NBITS_TAB \ + NB( 1) \ + NB( 2) \ + NB( 3) \ + NB( 4) \ + NB( 5) \ + NB( 6) \ + NB( 7) \ + NB( 8) \ + NB( 9) \ + NB(10) \ + NB(11) \ + NB(12) \ + NB(13) \ + NB(14) \ + NB(15) \ + NB(16) \ + NB(17) \ + NB(18) \ + NB(19) \ + NB(20) \ + NB(21) \ + NB(22) \ + NB(23) \ + NB(24) \ + NB(25) \ + NB(26) \ + NB(27) \ + NB(28) \ + NB(29) \ + NB(30) \ + NB(31) \ + NB(32) \ + \ + NB(33) \ + NB(34) \ + NB(35) \ + NB(36) \ + NB(37) \ + NB(38) \ + NB(39) \ + NB(40) \ + NB(41) \ + NB(42) \ + NB(43) \ + NB(44) \ + NB(45) \ + NB(46) \ + NB(47) \ + NB(48) \ + NB(49) \ + NB(50) \ + NB(51) \ + NB(52) \ + NB(53) \ + NB(54) \ + NB(55) \ + NB(56) \ + NB(57) \ + NB(58) \ + NB(59) \ + NB(60) \ + NB(61) \ + NB(62) \ + NB(63) \ + NB(64) \ + NB(65) \ + \ + NB(126) \ + NB(127) \ + NB(128) \ + NB(129) \ + NB(130) \ + \ + NB(254) \ + NB(255) \ + NB(256) \ + NB(257) \ + NB(258) \ + \ + NB(510) \ + NB(511) \ + NB(512) \ + NB(513) \ + NB(514) \ + \ + NB(1024) \ + NB(2048) \ + NB(4096) \ + NB(8192) \ + NB(16384) \ + +static void +test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) { + bitmap_info_t binfo_dyn; + bitmap_info_init(&binfo_dyn, nbits); + + assert_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn), + "Unexpected difference between static and dynamic initialization, " + "nbits=%zu", nbits); + assert_zu_eq(binfo->nbits, binfo_dyn.nbits, + "Unexpected difference between static and dynamic initialization, " + "nbits=%zu", nbits); +#ifdef BITMAP_USE_TREE + assert_u_eq(binfo->nlevels, binfo_dyn.nlevels, + "Unexpected difference between static and dynamic initialization, " + "nbits=%zu", nbits); + { + unsigned i; + + for (i = 0; i < binfo->nlevels; i++) { + assert_zu_eq(binfo->levels[i].group_offset, + binfo_dyn.levels[i].group_offset, + "Unexpected difference between static and dynamic " + "initialization, nbits=%zu, level=%u", nbits, i); + } + } +#else + assert_zu_eq(binfo->ngroups, binfo_dyn.ngroups, + "Unexpected difference between static and dynamic initialization"); +#endif +} + +TEST_BEGIN(test_bitmap_initializer) { +#define NB(nbits) { \ + if (nbits <= BITMAP_MAXBITS) { \ + bitmap_info_t binfo = \ + BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_initializer_body(&binfo, nbits); \ + } \ + } + NBITS_TAB +#undef NB +} +TEST_END + +static size_t +test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits, + size_t prev_size) { + size_t size = bitmap_size(binfo); + assert_zu_ge(size, (nbits >> 3), + "Bitmap size is smaller than expected"); + assert_zu_ge(size, prev_size, "Bitmap size is smaller than expected"); + return size; +} + +TEST_BEGIN(test_bitmap_size) { + size_t nbits, prev_size; + + prev_size = 0; + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, nbits); + prev_size = test_bitmap_size_body(&binfo, nbits, prev_size); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + prev_size = test_bitmap_size_body(&binfo, nbits, \ + prev_size); \ + } + prev_size = 0; + NBITS_TAB +#undef NB +} +TEST_END + +static void +test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits) { + size_t i; + bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); + assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); + + bitmap_init(bitmap, binfo, false); + for (i = 0; i < nbits; i++) { + assert_false(bitmap_get(bitmap, binfo, i), + "Bit should be unset"); + } + + bitmap_init(bitmap, binfo, true); + for (i = 0; i < nbits; i++) { + assert_true(bitmap_get(bitmap, binfo, i), "Bit should be set"); + } + + free(bitmap); +} + +TEST_BEGIN(test_bitmap_init) { + size_t nbits; + + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, nbits); + test_bitmap_init_body(&binfo, nbits); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_init_body(&binfo, nbits); \ + } + NBITS_TAB +#undef NB +} +TEST_END + +static void +test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits) { + size_t i; + bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); + assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); + bitmap_init(bitmap, binfo, false); + + for (i = 0; i < nbits; i++) { + bitmap_set(bitmap, binfo, i); + } + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + free(bitmap); +} + +TEST_BEGIN(test_bitmap_set) { + size_t nbits; + + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, nbits); + test_bitmap_set_body(&binfo, nbits); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_set_body(&binfo, nbits); \ + } + NBITS_TAB +#undef NB +} +TEST_END + +static void +test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits) { + size_t i; + bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); + assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); + bitmap_init(bitmap, binfo, false); + + for (i = 0; i < nbits; i++) { + bitmap_set(bitmap, binfo, i); + } + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + for (i = 0; i < nbits; i++) { + bitmap_unset(bitmap, binfo, i); + } + for (i = 0; i < nbits; i++) { + bitmap_set(bitmap, binfo, i); + } + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + free(bitmap); +} + +TEST_BEGIN(test_bitmap_unset) { + size_t nbits; + + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, nbits); + test_bitmap_unset_body(&binfo, nbits); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_unset_body(&binfo, nbits); \ + } + NBITS_TAB +#undef NB +} +TEST_END + +static void +test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) { + bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); + assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); + bitmap_init(bitmap, binfo, false); + + /* Iteratively set bits starting at the beginning. */ + for (size_t i = 0; i < nbits; i++) { + assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, + "First unset bit should be just after previous first unset " + "bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, + "First unset bit should be just after previous first unset " + "bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "First unset bit should be just after previous first unset " + "bit"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "First unset bit should be just after previous first unset " + "bit"); + } + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + + /* + * Iteratively unset bits starting at the end, and verify that + * bitmap_sfu() reaches the unset bits. + */ + for (size_t i = nbits - 1; i < nbits; i--) { /* (nbits..0] */ + bitmap_unset(bitmap, binfo, i); + assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, + "First unset bit should the bit previously unset"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, + "First unset bit should the bit previously unset"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "First unset bit should the bit previously unset"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "First unset bit should the bit previously unset"); + bitmap_unset(bitmap, binfo, i); + } + assert_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset"); + + /* + * Iteratively set bits starting at the beginning, and verify that + * bitmap_sfu() looks past them. + */ + for (size_t i = 1; i < nbits; i++) { + bitmap_set(bitmap, binfo, i - 1); + assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, + "First unset bit should be just after the bit previously " + "set"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, + "First unset bit should be just after the bit previously " + "set"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "First unset bit should be just after the bit previously " + "set"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "First unset bit should be just after the bit previously " + "set"); + bitmap_unset(bitmap, binfo, i); + } + assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1, + "First unset bit should be the last bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1), + nbits - 1, "First unset bit should be the last bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1, + "First unset bit should be the last bit"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1, + "First unset bit should be the last bit"); + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + + /* + * Bubble a "usu" pattern through the bitmap and verify that + * bitmap_ffu() finds the correct bit for all five min_bit cases. + */ + if (nbits >= 3) { + for (size_t i = 0; i < nbits-2; i++) { + bitmap_unset(bitmap, binfo, i); + bitmap_unset(bitmap, binfo, i+2); + if (i > 0) { + assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i, + "Unexpected first unset bit"); + } + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2, + "Unexpected first unset bit"); + if (i + 3 < nbits) { + assert_zu_eq(bitmap_ffu(bitmap, binfo, i+3), + nbits, "Unexpected first unset bit"); + } + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), i+2, + "Unexpected first unset bit"); + } + } + + /* + * Unset the last bit, bubble another unset bit through the bitmap, and + * verify that bitmap_ffu() finds the correct bit for all four min_bit + * cases. + */ + if (nbits >= 3) { + bitmap_unset(bitmap, binfo, nbits-1); + for (size_t i = 0; i < nbits-1; i++) { + bitmap_unset(bitmap, binfo, i); + if (i > 0) { + assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i, + "Unexpected first unset bit"); + } + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1), + nbits-1, "Unexpected first unset bit"); + + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "Unexpected first unset bit"); + } + assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1, + "Unexpected first unset bit"); + } + + free(bitmap); +} + +TEST_BEGIN(test_bitmap_xfu) { + size_t nbits; + + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, nbits); + test_bitmap_xfu_body(&binfo, nbits); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_xfu_body(&binfo, nbits); \ + } + NBITS_TAB +#undef NB +} +TEST_END + +int +main(void) { + return test( + test_bitmap_initializer, + test_bitmap_size, + test_bitmap_init, + test_bitmap_set, + test_bitmap_unset, + test_bitmap_xfu); +} diff --git a/deps/jemalloc/test/unit/ckh.c b/deps/jemalloc/test/unit/ckh.c new file mode 100644 index 0000000..707ea5f --- /dev/null +++ b/deps/jemalloc/test/unit/ckh.c @@ -0,0 +1,211 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_new_delete) { + tsd_t *tsd; + ckh_t ckh; + + tsd = tsd_fetch(); + + assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, + ckh_string_keycomp), "Unexpected ckh_new() error"); + ckh_delete(tsd, &ckh); + + assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash, + ckh_pointer_keycomp), "Unexpected ckh_new() error"); + ckh_delete(tsd, &ckh); +} +TEST_END + +TEST_BEGIN(test_count_insert_search_remove) { + tsd_t *tsd; + ckh_t ckh; + const char *strs[] = { + "a string", + "A string", + "a string.", + "A string." + }; + const char *missing = "A string not in the hash table."; + size_t i; + + tsd = tsd_fetch(); + + assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, + ckh_string_keycomp), "Unexpected ckh_new() error"); + assert_zu_eq(ckh_count(&ckh), 0, + "ckh_count() should return %zu, but it returned %zu", ZU(0), + ckh_count(&ckh)); + + /* Insert. */ + for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + ckh_insert(tsd, &ckh, strs[i], strs[i]); + assert_zu_eq(ckh_count(&ckh), i+1, + "ckh_count() should return %zu, but it returned %zu", i+1, + ckh_count(&ckh)); + } + + /* Search. */ + for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + union { + void *p; + const char *s; + } k, v; + void **kp, **vp; + const char *ks, *vs; + + kp = (i & 1) ? &k.p : NULL; + vp = (i & 2) ? &v.p : NULL; + k.p = NULL; + v.p = NULL; + assert_false(ckh_search(&ckh, strs[i], kp, vp), + "Unexpected ckh_search() error"); + + ks = (i & 1) ? strs[i] : (const char *)NULL; + vs = (i & 2) ? strs[i] : (const char *)NULL; + assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", + i); + assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", + i); + } + assert_true(ckh_search(&ckh, missing, NULL, NULL), + "Unexpected ckh_search() success"); + + /* Remove. */ + for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + union { + void *p; + const char *s; + } k, v; + void **kp, **vp; + const char *ks, *vs; + + kp = (i & 1) ? &k.p : NULL; + vp = (i & 2) ? &v.p : NULL; + k.p = NULL; + v.p = NULL; + assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp), + "Unexpected ckh_remove() error"); + + ks = (i & 1) ? strs[i] : (const char *)NULL; + vs = (i & 2) ? strs[i] : (const char *)NULL; + assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", + i); + assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", + i); + assert_zu_eq(ckh_count(&ckh), + sizeof(strs)/sizeof(const char *) - i - 1, + "ckh_count() should return %zu, but it returned %zu", + sizeof(strs)/sizeof(const char *) - i - 1, + ckh_count(&ckh)); + } + + ckh_delete(tsd, &ckh); +} +TEST_END + +TEST_BEGIN(test_insert_iter_remove) { +#define NITEMS ZU(1000) + tsd_t *tsd; + ckh_t ckh; + void **p[NITEMS]; + void *q, *r; + size_t i; + + tsd = tsd_fetch(); + + assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash, + ckh_pointer_keycomp), "Unexpected ckh_new() error"); + + for (i = 0; i < NITEMS; i++) { + p[i] = mallocx(i+1, 0); + assert_ptr_not_null(p[i], "Unexpected mallocx() failure"); + } + + for (i = 0; i < NITEMS; i++) { + size_t j; + + for (j = i; j < NITEMS; j++) { + assert_false(ckh_insert(tsd, &ckh, p[j], p[j]), + "Unexpected ckh_insert() failure"); + assert_false(ckh_search(&ckh, p[j], &q, &r), + "Unexpected ckh_search() failure"); + assert_ptr_eq(p[j], q, "Key pointer mismatch"); + assert_ptr_eq(p[j], r, "Value pointer mismatch"); + } + + assert_zu_eq(ckh_count(&ckh), NITEMS, + "ckh_count() should return %zu, but it returned %zu", + NITEMS, ckh_count(&ckh)); + + for (j = i + 1; j < NITEMS; j++) { + assert_false(ckh_search(&ckh, p[j], NULL, NULL), + "Unexpected ckh_search() failure"); + assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r), + "Unexpected ckh_remove() failure"); + assert_ptr_eq(p[j], q, "Key pointer mismatch"); + assert_ptr_eq(p[j], r, "Value pointer mismatch"); + assert_true(ckh_search(&ckh, p[j], NULL, NULL), + "Unexpected ckh_search() success"); + assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r), + "Unexpected ckh_remove() success"); + } + + { + bool seen[NITEMS]; + size_t tabind; + + memset(seen, 0, sizeof(seen)); + + for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) { + size_t k; + + assert_ptr_eq(q, r, "Key and val not equal"); + + for (k = 0; k < NITEMS; k++) { + if (p[k] == q) { + assert_false(seen[k], + "Item %zu already seen", k); + seen[k] = true; + break; + } + } + } + + for (j = 0; j < i + 1; j++) { + assert_true(seen[j], "Item %zu not seen", j); + } + for (; j < NITEMS; j++) { + assert_false(seen[j], "Item %zu seen", j); + } + } + } + + for (i = 0; i < NITEMS; i++) { + assert_false(ckh_search(&ckh, p[i], NULL, NULL), + "Unexpected ckh_search() failure"); + assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r), + "Unexpected ckh_remove() failure"); + assert_ptr_eq(p[i], q, "Key pointer mismatch"); + assert_ptr_eq(p[i], r, "Value pointer mismatch"); + assert_true(ckh_search(&ckh, p[i], NULL, NULL), + "Unexpected ckh_search() success"); + assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r), + "Unexpected ckh_remove() success"); + dallocx(p[i], 0); + } + + assert_zu_eq(ckh_count(&ckh), 0, + "ckh_count() should return %zu, but it returned %zu", + ZU(0), ckh_count(&ckh)); + ckh_delete(tsd, &ckh); +#undef NITEMS +} +TEST_END + +int +main(void) { + return test( + test_new_delete, + test_count_insert_search_remove, + test_insert_iter_remove); +} diff --git a/deps/jemalloc/test/unit/decay.c b/deps/jemalloc/test/unit/decay.c new file mode 100644 index 0000000..cf3c079 --- /dev/null +++ b/deps/jemalloc/test/unit/decay.c @@ -0,0 +1,605 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/ticker.h" + +static nstime_monotonic_t *nstime_monotonic_orig; +static nstime_update_t *nstime_update_orig; + +static unsigned nupdates_mock; +static nstime_t time_mock; +static bool monotonic_mock; + +static bool +check_background_thread_enabled(void) { + bool enabled; + size_t sz = sizeof(bool); + int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0); + if (ret == ENOENT) { + return false; + } + assert_d_eq(ret, 0, "Unexpected mallctl error"); + return enabled; +} + +static bool +nstime_monotonic_mock(void) { + return monotonic_mock; +} + +static bool +nstime_update_mock(nstime_t *time) { + nupdates_mock++; + if (monotonic_mock) { + nstime_copy(time, &time_mock); + } + return !monotonic_mock; +} + +static unsigned +do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) { + unsigned arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + + assert_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen), + 0, "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, + (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0, + "Unexpected mallctlbymib() failure"); + + assert_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen), + 0, "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0, + "Unexpected mallctlbymib() failure"); + + return arena_ind; +} + +static void +do_arena_destroy(unsigned arena_ind) { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +void +do_epoch(void) { + uint64_t epoch = 1; + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); +} + +void +do_purge(unsigned arena_ind) { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +void +do_decay(unsigned arena_ind) { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +static uint64_t +get_arena_npurge_impl(const char *mibname, unsigned arena_ind) { + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib(mibname, mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[2] = (size_t)arena_ind; + uint64_t npurge = 0; + size_t sz = sizeof(npurge); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0), + config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure"); + return npurge; +} + +static uint64_t +get_arena_dirty_npurge(unsigned arena_ind) { + do_epoch(); + return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind); +} + +static uint64_t +get_arena_dirty_purged(unsigned arena_ind) { + do_epoch(); + return get_arena_npurge_impl("stats.arenas.0.dirty_purged", arena_ind); +} + +static uint64_t +get_arena_muzzy_npurge(unsigned arena_ind) { + do_epoch(); + return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); +} + +static uint64_t +get_arena_npurge(unsigned arena_ind) { + do_epoch(); + return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) + + get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); +} + +static size_t +get_arena_pdirty(unsigned arena_ind) { + do_epoch(); + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[2] = (size_t)arena_ind; + size_t pdirty; + size_t sz = sizeof(pdirty); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); + return pdirty; +} + +static size_t +get_arena_pmuzzy(unsigned arena_ind) { + do_epoch(); + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[2] = (size_t)arena_ind; + size_t pmuzzy; + size_t sz = sizeof(pmuzzy); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); + return pmuzzy; +} + +static void * +do_mallocx(size_t size, int flags) { + void *p = mallocx(size, flags); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + return p; +} + +static void +generate_dirty(unsigned arena_ind, size_t size) { + int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + void *p = do_mallocx(size, flags); + dallocx(p, flags); +} + +TEST_BEGIN(test_decay_ticks) { + test_skip_if(check_background_thread_enabled()); + + ticker_t *decay_ticker; + unsigned tick0, tick1, arena_ind; + size_t sz, large0; + void *p; + + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + + /* Set up a manually managed arena for test. */ + arena_ind = do_arena_create(0, 0); + + /* Migrate to the new arena, and get the ticker. */ + unsigned old_arena_ind; + size_t sz_arena_ind = sizeof(old_arena_ind); + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, + &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0, + "Unexpected mallctl() failure"); + decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind); + assert_ptr_not_null(decay_ticker, + "Unexpected failure getting decay ticker"); + + /* + * Test the standard APIs using a large size class, since we can't + * control tcache interactions for small size classes (except by + * completely disabling tcache for the entire test program). + */ + + /* malloc(). */ + tick0 = ticker_read(decay_ticker); + p = malloc(large0); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()"); + /* free(). */ + tick0 = ticker_read(decay_ticker); + free(p); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()"); + + /* calloc(). */ + tick0 = ticker_read(decay_ticker); + p = calloc(1, large0); + assert_ptr_not_null(p, "Unexpected calloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()"); + free(p); + + /* posix_memalign(). */ + tick0 = ticker_read(decay_ticker); + assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0, + "Unexpected posix_memalign() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during posix_memalign()"); + free(p); + + /* aligned_alloc(). */ + tick0 = ticker_read(decay_ticker); + p = aligned_alloc(sizeof(size_t), large0); + assert_ptr_not_null(p, "Unexpected aligned_alloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during aligned_alloc()"); + free(p); + + /* realloc(). */ + /* Allocate. */ + tick0 = ticker_read(decay_ticker); + p = realloc(NULL, large0); + assert_ptr_not_null(p, "Unexpected realloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); + /* Reallocate. */ + tick0 = ticker_read(decay_ticker); + p = realloc(p, large0); + assert_ptr_not_null(p, "Unexpected realloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); + /* Deallocate. */ + tick0 = ticker_read(decay_ticker); + realloc(p, 0); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); + + /* + * Test the *allocx() APIs using large and small size classes, with + * tcache explicitly disabled. + */ + { + unsigned i; + size_t allocx_sizes[2]; + allocx_sizes[0] = large0; + allocx_sizes[1] = 1; + + for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) { + sz = allocx_sizes[i]; + + /* mallocx(). */ + tick0 = ticker_read(decay_ticker); + p = mallocx(sz, MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during mallocx() (sz=%zu)", + sz); + /* rallocx(). */ + tick0 = ticker_read(decay_ticker); + p = rallocx(p, sz, MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during rallocx() (sz=%zu)", + sz); + /* xallocx(). */ + tick0 = ticker_read(decay_ticker); + xallocx(p, sz, 0, MALLOCX_TCACHE_NONE); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during xallocx() (sz=%zu)", + sz); + /* dallocx(). */ + tick0 = ticker_read(decay_ticker); + dallocx(p, MALLOCX_TCACHE_NONE); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during dallocx() (sz=%zu)", + sz); + /* sdallocx(). */ + p = mallocx(sz, MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + tick0 = ticker_read(decay_ticker); + sdallocx(p, sz, MALLOCX_TCACHE_NONE); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during sdallocx() " + "(sz=%zu)", sz); + } + } + + /* + * Test tcache fill/flush interactions for large and small size classes, + * using an explicit tcache. + */ + unsigned tcache_ind, i; + size_t tcache_sizes[2]; + tcache_sizes[0] = large0; + tcache_sizes[1] = 1; + + size_t tcache_max, sz_tcache_max; + sz_tcache_max = sizeof(tcache_max); + assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, + &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure"); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz, + NULL, 0), 0, "Unexpected mallctl failure"); + + for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) { + sz = tcache_sizes[i]; + + /* tcache fill. */ + tick0 = ticker_read(decay_ticker); + p = mallocx(sz, MALLOCX_TCACHE(tcache_ind)); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during tcache fill " + "(sz=%zu)", sz); + /* tcache flush. */ + dallocx(p, MALLOCX_TCACHE(tcache_ind)); + tick0 = ticker_read(decay_ticker); + assert_d_eq(mallctl("tcache.flush", NULL, NULL, + (void *)&tcache_ind, sizeof(unsigned)), 0, + "Unexpected mallctl failure"); + tick1 = ticker_read(decay_ticker); + + /* Will only tick if it's in tcache. */ + if (sz <= tcache_max) { + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during tcache " + "flush (sz=%zu)", sz); + } else { + assert_u32_eq(tick1, tick0, + "Unexpected ticker tick during tcache " + "flush (sz=%zu)", sz); + } + } +} +TEST_END + +static void +decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt, + uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) { +#define NINTERVALS 101 + nstime_t time, update_interval, decay_ms, deadline; + + nstime_init(&time, 0); + nstime_update(&time); + + nstime_init2(&decay_ms, dt, 0); + nstime_copy(&deadline, &time); + nstime_add(&deadline, &decay_ms); + + nstime_init2(&update_interval, dt, 0); + nstime_idivide(&update_interval, NINTERVALS); + + /* + * Keep q's slab from being deallocated during the looping below. If a + * cached slab were to repeatedly come and go during looping, it could + * prevent the decay backlog ever becoming empty. + */ + void *p = do_mallocx(1, flags); + uint64_t dirty_npurge1, muzzy_npurge1; + do { + for (unsigned i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; + i++) { + void *q = do_mallocx(1, flags); + dallocx(q, flags); + } + dirty_npurge1 = get_arena_dirty_npurge(arena_ind); + muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind); + + nstime_add(&time_mock, &update_interval); + nstime_update(&time); + } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 == + dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) || + !terminate_asap)); + dallocx(p, flags); + + if (config_stats) { + assert_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 + + muzzy_npurge0, "Expected purging to occur"); + } +#undef NINTERVALS +} + +TEST_BEGIN(test_decay_ticker) { + test_skip_if(check_background_thread_enabled()); +#define NPS 2048 + ssize_t ddt = opt_dirty_decay_ms; + ssize_t mdt = opt_muzzy_decay_ms; + unsigned arena_ind = do_arena_create(ddt, mdt); + int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); + void *ps[NPS]; + size_t large; + + /* + * Allocate a bunch of large objects, pause the clock, deallocate every + * other object (to fragment virtual memory), restore the clock, then + * [md]allocx() in a tight loop while advancing time rapidly to verify + * the ticker triggers purging. + */ + + size_t tcache_max; + size_t sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + large = nallocx(tcache_max + 1, flags); + + do_purge(arena_ind); + uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind); + uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind); + + for (unsigned i = 0; i < NPS; i++) { + ps[i] = do_mallocx(large, flags); + } + + nupdates_mock = 0; + nstime_init(&time_mock, 0); + nstime_update(&time_mock); + monotonic_mock = true; + + nstime_monotonic_orig = nstime_monotonic; + nstime_update_orig = nstime_update; + nstime_monotonic = nstime_monotonic_mock; + nstime_update = nstime_update_mock; + + for (unsigned i = 0; i < NPS; i += 2) { + dallocx(ps[i], flags); + unsigned nupdates0 = nupdates_mock; + do_decay(arena_ind); + assert_u_gt(nupdates_mock, nupdates0, + "Expected nstime_update() to be called"); + } + + decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0, + muzzy_npurge0, true); + decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0, + muzzy_npurge0, false); + + do_arena_destroy(arena_ind); + + nstime_monotonic = nstime_monotonic_orig; + nstime_update = nstime_update_orig; +#undef NPS +} +TEST_END + +TEST_BEGIN(test_decay_nonmonotonic) { + test_skip_if(check_background_thread_enabled()); +#define NPS (SMOOTHSTEP_NSTEPS + 1) + int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); + void *ps[NPS]; + uint64_t npurge0 = 0; + uint64_t npurge1 = 0; + size_t sz, large0; + unsigned i, nupdates0; + + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure"); + do_epoch(); + sz = sizeof(uint64_t); + npurge0 = get_arena_npurge(0); + + nupdates_mock = 0; + nstime_init(&time_mock, 0); + nstime_update(&time_mock); + monotonic_mock = false; + + nstime_monotonic_orig = nstime_monotonic; + nstime_update_orig = nstime_update; + nstime_monotonic = nstime_monotonic_mock; + nstime_update = nstime_update_mock; + + for (i = 0; i < NPS; i++) { + ps[i] = mallocx(large0, flags); + assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); + } + + for (i = 0; i < NPS; i++) { + dallocx(ps[i], flags); + nupdates0 = nupdates_mock; + assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, + "Unexpected arena.0.decay failure"); + assert_u_gt(nupdates_mock, nupdates0, + "Expected nstime_update() to be called"); + } + + do_epoch(); + sz = sizeof(uint64_t); + npurge1 = get_arena_npurge(0); + + if (config_stats) { + assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred"); + } + + nstime_monotonic = nstime_monotonic_orig; + nstime_update = nstime_update_orig; +#undef NPS +} +TEST_END + +TEST_BEGIN(test_decay_now) { + test_skip_if(check_background_thread_enabled()); + + unsigned arena_ind = do_arena_create(0, 0); + assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); + assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); + size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2}; + /* Verify that dirty/muzzy pages never linger after deallocation. */ + for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { + size_t size = sizes[i]; + generate_dirty(arena_ind, size); + assert_zu_eq(get_arena_pdirty(arena_ind), 0, + "Unexpected dirty pages"); + assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, + "Unexpected muzzy pages"); + } + do_arena_destroy(arena_ind); +} +TEST_END + +TEST_BEGIN(test_decay_never) { + test_skip_if(check_background_thread_enabled() || !config_stats); + + unsigned arena_ind = do_arena_create(-1, -1); + int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); + assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); + size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2}; + void *ptrs[sizeof(sizes)/sizeof(size_t)]; + for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { + ptrs[i] = do_mallocx(sizes[i], flags); + } + /* Verify that each deallocation generates additional dirty pages. */ + size_t pdirty_prev = get_arena_pdirty(arena_ind); + size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind); + assert_zu_eq(pdirty_prev, 0, "Unexpected dirty pages"); + assert_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages"); + for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { + dallocx(ptrs[i], flags); + size_t pdirty = get_arena_pdirty(arena_ind); + size_t pmuzzy = get_arena_pmuzzy(arena_ind); + assert_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind), + pdirty_prev, "Expected dirty pages to increase."); + assert_zu_eq(pmuzzy, 0, "Unexpected muzzy pages"); + pdirty_prev = pdirty; + } + do_arena_destroy(arena_ind); +} +TEST_END + +int +main(void) { + return test( + test_decay_ticks, + test_decay_ticker, + test_decay_nonmonotonic, + test_decay_now, + test_decay_never); +} diff --git a/deps/jemalloc/test/unit/decay.sh b/deps/jemalloc/test/unit/decay.sh new file mode 100644 index 0000000..45aeccf --- /dev/null +++ b/deps/jemalloc/test/unit/decay.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +export MALLOC_CONF="dirty_decay_ms:1000,muzzy_decay_ms:1000,lg_tcache_max:0" diff --git a/deps/jemalloc/test/unit/div.c b/deps/jemalloc/test/unit/div.c new file mode 100644 index 0000000..b47f10b --- /dev/null +++ b/deps/jemalloc/test/unit/div.c @@ -0,0 +1,29 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/div.h" + +TEST_BEGIN(test_div_exhaustive) { + for (size_t divisor = 2; divisor < 1000 * 1000; ++divisor) { + div_info_t div_info; + div_init(&div_info, divisor); + size_t max = 1000 * divisor; + if (max < 1000 * 1000) { + max = 1000 * 1000; + } + for (size_t dividend = 0; dividend < 1000 * divisor; + dividend += divisor) { + size_t quotient = div_compute( + &div_info, dividend); + assert_zu_eq(dividend, quotient * divisor, + "With divisor = %zu, dividend = %zu, " + "got quotient %zu", divisor, dividend, quotient); + } + } +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_div_exhaustive); +} diff --git a/deps/jemalloc/test/unit/emitter.c b/deps/jemalloc/test/unit/emitter.c new file mode 100644 index 0000000..b4a693f --- /dev/null +++ b/deps/jemalloc/test/unit/emitter.c @@ -0,0 +1,469 @@ +#include "test/jemalloc_test.h" +#include "jemalloc/internal/emitter.h" + +/* + * This is so useful for debugging and feature work, we'll leave printing + * functionality committed but disabled by default. + */ +/* Print the text as it will appear. */ +static bool print_raw = false; +/* Print the text escaped, so it can be copied back into the test case. */ +static bool print_escaped = false; + +typedef struct buf_descriptor_s buf_descriptor_t; +struct buf_descriptor_s { + char *buf; + size_t len; + bool mid_quote; +}; + +/* + * Forwards all writes to the passed-in buf_v (which should be cast from a + * buf_descriptor_t *). + */ +static void +forwarding_cb(void *buf_descriptor_v, const char *str) { + buf_descriptor_t *buf_descriptor = (buf_descriptor_t *)buf_descriptor_v; + + if (print_raw) { + malloc_printf("%s", str); + } + if (print_escaped) { + const char *it = str; + while (*it != '\0') { + if (!buf_descriptor->mid_quote) { + malloc_printf("\""); + buf_descriptor->mid_quote = true; + } + switch (*it) { + case '\\': + malloc_printf("\\"); + break; + case '\"': + malloc_printf("\\\""); + break; + case '\t': + malloc_printf("\\t"); + break; + case '\n': + malloc_printf("\\n\"\n"); + buf_descriptor->mid_quote = false; + break; + default: + malloc_printf("%c", *it); + } + it++; + } + } + + size_t written = malloc_snprintf(buf_descriptor->buf, + buf_descriptor->len, "%s", str); + assert_zu_eq(written, strlen(str), "Buffer overflow!"); + buf_descriptor->buf += written; + buf_descriptor->len -= written; + assert_zu_gt(buf_descriptor->len, 0, "Buffer out of space!"); +} + +static void +assert_emit_output(void (*emit_fn)(emitter_t *), + const char *expected_json_output, const char *expected_table_output) { + emitter_t emitter; + char buf[MALLOC_PRINTF_BUFSIZE]; + buf_descriptor_t buf_descriptor; + + buf_descriptor.buf = buf; + buf_descriptor.len = MALLOC_PRINTF_BUFSIZE; + buf_descriptor.mid_quote = false; + + emitter_init(&emitter, emitter_output_json, &forwarding_cb, + &buf_descriptor); + (*emit_fn)(&emitter); + assert_str_eq(expected_json_output, buf, "json output failure"); + + buf_descriptor.buf = buf; + buf_descriptor.len = MALLOC_PRINTF_BUFSIZE; + buf_descriptor.mid_quote = false; + + emitter_init(&emitter, emitter_output_table, &forwarding_cb, + &buf_descriptor); + (*emit_fn)(&emitter); + assert_str_eq(expected_table_output, buf, "table output failure"); +} + +static void +emit_dict(emitter_t *emitter) { + bool b_false = false; + bool b_true = true; + int i_123 = 123; + const char *str = "a string"; + + emitter_begin(emitter); + emitter_dict_begin(emitter, "foo", "This is the foo table:"); + emitter_kv(emitter, "abc", "ABC", emitter_type_bool, &b_false); + emitter_kv(emitter, "def", "DEF", emitter_type_bool, &b_true); + emitter_kv_note(emitter, "ghi", "GHI", emitter_type_int, &i_123, + "note_key1", emitter_type_string, &str); + emitter_kv_note(emitter, "jkl", "JKL", emitter_type_string, &str, + "note_key2", emitter_type_bool, &b_false); + emitter_dict_end(emitter); + emitter_end(emitter); +} +static const char *dict_json = +"{\n" +"\t\"foo\": {\n" +"\t\t\"abc\": false,\n" +"\t\t\"def\": true,\n" +"\t\t\"ghi\": 123,\n" +"\t\t\"jkl\": \"a string\"\n" +"\t}\n" +"}\n"; +static const char *dict_table = +"This is the foo table:\n" +" ABC: false\n" +" DEF: true\n" +" GHI: 123 (note_key1: \"a string\")\n" +" JKL: \"a string\" (note_key2: false)\n"; + +TEST_BEGIN(test_dict) { + assert_emit_output(&emit_dict, dict_json, dict_table); +} +TEST_END + +static void +emit_table_printf(emitter_t *emitter) { + emitter_begin(emitter); + emitter_table_printf(emitter, "Table note 1\n"); + emitter_table_printf(emitter, "Table note 2 %s\n", + "with format string"); + emitter_end(emitter); +} + +static const char *table_printf_json = +"{\n" +"}\n"; + +static const char *table_printf_table = +"Table note 1\n" +"Table note 2 with format string\n"; + +TEST_BEGIN(test_table_printf) { + assert_emit_output(&emit_table_printf, table_printf_json, + table_printf_table); +} +TEST_END + +static void emit_nested_dict(emitter_t *emitter) { + int val = 123; + emitter_begin(emitter); + emitter_dict_begin(emitter, "json1", "Dict 1"); + emitter_dict_begin(emitter, "json2", "Dict 2"); + emitter_kv(emitter, "primitive", "A primitive", emitter_type_int, &val); + emitter_dict_end(emitter); /* Close 2 */ + emitter_dict_begin(emitter, "json3", "Dict 3"); + emitter_dict_end(emitter); /* Close 3 */ + emitter_dict_end(emitter); /* Close 1 */ + emitter_dict_begin(emitter, "json4", "Dict 4"); + emitter_kv(emitter, "primitive", "Another primitive", + emitter_type_int, &val); + emitter_dict_end(emitter); /* Close 4 */ + emitter_end(emitter); +} + +static const char *nested_object_json = +"{\n" +"\t\"json1\": {\n" +"\t\t\"json2\": {\n" +"\t\t\t\"primitive\": 123\n" +"\t\t},\n" +"\t\t\"json3\": {\n" +"\t\t}\n" +"\t},\n" +"\t\"json4\": {\n" +"\t\t\"primitive\": 123\n" +"\t}\n" +"}\n"; + +static const char *nested_object_table = +"Dict 1\n" +" Dict 2\n" +" A primitive: 123\n" +" Dict 3\n" +"Dict 4\n" +" Another primitive: 123\n"; + +TEST_BEGIN(test_nested_dict) { + assert_emit_output(&emit_nested_dict, nested_object_json, + nested_object_table); +} +TEST_END + +static void +emit_types(emitter_t *emitter) { + bool b = false; + int i = -123; + unsigned u = 123; + ssize_t zd = -456; + size_t zu = 456; + const char *str = "string"; + uint32_t u32 = 789; + uint64_t u64 = 10000000000ULL; + + emitter_begin(emitter); + emitter_kv(emitter, "k1", "K1", emitter_type_bool, &b); + emitter_kv(emitter, "k2", "K2", emitter_type_int, &i); + emitter_kv(emitter, "k3", "K3", emitter_type_unsigned, &u); + emitter_kv(emitter, "k4", "K4", emitter_type_ssize, &zd); + emitter_kv(emitter, "k5", "K5", emitter_type_size, &zu); + emitter_kv(emitter, "k6", "K6", emitter_type_string, &str); + emitter_kv(emitter, "k7", "K7", emitter_type_uint32, &u32); + emitter_kv(emitter, "k8", "K8", emitter_type_uint64, &u64); + /* + * We don't test the title type, since it's only used for tables. It's + * tested in the emitter_table_row tests. + */ + emitter_end(emitter); +} + +static const char *types_json = +"{\n" +"\t\"k1\": false,\n" +"\t\"k2\": -123,\n" +"\t\"k3\": 123,\n" +"\t\"k4\": -456,\n" +"\t\"k5\": 456,\n" +"\t\"k6\": \"string\",\n" +"\t\"k7\": 789,\n" +"\t\"k8\": 10000000000\n" +"}\n"; + +static const char *types_table = +"K1: false\n" +"K2: -123\n" +"K3: 123\n" +"K4: -456\n" +"K5: 456\n" +"K6: \"string\"\n" +"K7: 789\n" +"K8: 10000000000\n"; + +TEST_BEGIN(test_types) { + assert_emit_output(&emit_types, types_json, types_table); +} +TEST_END + +static void +emit_modal(emitter_t *emitter) { + int val = 123; + emitter_begin(emitter); + emitter_dict_begin(emitter, "j0", "T0"); + emitter_json_key(emitter, "j1"); + emitter_json_object_begin(emitter); + emitter_kv(emitter, "i1", "I1", emitter_type_int, &val); + emitter_json_kv(emitter, "i2", emitter_type_int, &val); + emitter_table_kv(emitter, "I3", emitter_type_int, &val); + emitter_table_dict_begin(emitter, "T1"); + emitter_kv(emitter, "i4", "I4", emitter_type_int, &val); + emitter_json_object_end(emitter); /* Close j1 */ + emitter_kv(emitter, "i5", "I5", emitter_type_int, &val); + emitter_table_dict_end(emitter); /* Close T1 */ + emitter_kv(emitter, "i6", "I6", emitter_type_int, &val); + emitter_dict_end(emitter); /* Close j0 / T0 */ + emitter_end(emitter); +} + +const char *modal_json = +"{\n" +"\t\"j0\": {\n" +"\t\t\"j1\": {\n" +"\t\t\t\"i1\": 123,\n" +"\t\t\t\"i2\": 123,\n" +"\t\t\t\"i4\": 123\n" +"\t\t},\n" +"\t\t\"i5\": 123,\n" +"\t\t\"i6\": 123\n" +"\t}\n" +"}\n"; + +const char *modal_table = +"T0\n" +" I1: 123\n" +" I3: 123\n" +" T1\n" +" I4: 123\n" +" I5: 123\n" +" I6: 123\n"; + +TEST_BEGIN(test_modal) { + assert_emit_output(&emit_modal, modal_json, modal_table); +} +TEST_END + +static void +emit_json_arr(emitter_t *emitter) { + int ival = 123; + + emitter_begin(emitter); + emitter_json_key(emitter, "dict"); + emitter_json_object_begin(emitter); + emitter_json_key(emitter, "arr"); + emitter_json_array_begin(emitter); + emitter_json_object_begin(emitter); + emitter_json_kv(emitter, "foo", emitter_type_int, &ival); + emitter_json_object_end(emitter); /* Close arr[0] */ + /* arr[1] and arr[2] are primitives. */ + emitter_json_value(emitter, emitter_type_int, &ival); + emitter_json_value(emitter, emitter_type_int, &ival); + emitter_json_object_begin(emitter); + emitter_json_kv(emitter, "bar", emitter_type_int, &ival); + emitter_json_kv(emitter, "baz", emitter_type_int, &ival); + emitter_json_object_end(emitter); /* Close arr[3]. */ + emitter_json_array_end(emitter); /* Close arr. */ + emitter_json_object_end(emitter); /* Close dict. */ + emitter_end(emitter); +} + +static const char *json_array_json = +"{\n" +"\t\"dict\": {\n" +"\t\t\"arr\": [\n" +"\t\t\t{\n" +"\t\t\t\t\"foo\": 123\n" +"\t\t\t},\n" +"\t\t\t123,\n" +"\t\t\t123,\n" +"\t\t\t{\n" +"\t\t\t\t\"bar\": 123,\n" +"\t\t\t\t\"baz\": 123\n" +"\t\t\t}\n" +"\t\t]\n" +"\t}\n" +"}\n"; + +static const char *json_array_table = ""; + +TEST_BEGIN(test_json_arr) { + assert_emit_output(&emit_json_arr, json_array_json, json_array_table); +} +TEST_END + +static void +emit_json_nested_array(emitter_t *emitter) { + int ival = 123; + char *sval = "foo"; + emitter_begin(emitter); + emitter_json_array_begin(emitter); + emitter_json_array_begin(emitter); + emitter_json_value(emitter, emitter_type_int, &ival); + emitter_json_value(emitter, emitter_type_string, &sval); + emitter_json_value(emitter, emitter_type_int, &ival); + emitter_json_value(emitter, emitter_type_string, &sval); + emitter_json_array_end(emitter); + emitter_json_array_begin(emitter); + emitter_json_value(emitter, emitter_type_int, &ival); + emitter_json_array_end(emitter); + emitter_json_array_begin(emitter); + emitter_json_value(emitter, emitter_type_string, &sval); + emitter_json_value(emitter, emitter_type_int, &ival); + emitter_json_array_end(emitter); + emitter_json_array_begin(emitter); + emitter_json_array_end(emitter); + emitter_json_array_end(emitter); + emitter_end(emitter); +} + +static const char *json_nested_array_json = +"{\n" +"\t[\n" +"\t\t[\n" +"\t\t\t123,\n" +"\t\t\t\"foo\",\n" +"\t\t\t123,\n" +"\t\t\t\"foo\"\n" +"\t\t],\n" +"\t\t[\n" +"\t\t\t123\n" +"\t\t],\n" +"\t\t[\n" +"\t\t\t\"foo\",\n" +"\t\t\t123\n" +"\t\t],\n" +"\t\t[\n" +"\t\t]\n" +"\t]\n" +"}\n"; + +TEST_BEGIN(test_json_nested_arr) { + assert_emit_output(&emit_json_nested_array, json_nested_array_json, + json_array_table); +} +TEST_END + +static void +emit_table_row(emitter_t *emitter) { + emitter_begin(emitter); + emitter_row_t row; + emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title, {0}, {0, 0}}; + abc.str_val = "ABC title"; + emitter_col_t def = {emitter_justify_right, 15, emitter_type_title, {0}, {0, 0}}; + def.str_val = "DEF title"; + emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title, {0}, {0, 0}}; + ghi.str_val = "GHI"; + + emitter_row_init(&row); + emitter_col_init(&abc, &row); + emitter_col_init(&def, &row); + emitter_col_init(&ghi, &row); + + emitter_table_row(emitter, &row); + + abc.type = emitter_type_int; + def.type = emitter_type_bool; + ghi.type = emitter_type_int; + + abc.int_val = 123; + def.bool_val = true; + ghi.int_val = 456; + emitter_table_row(emitter, &row); + + abc.int_val = 789; + def.bool_val = false; + ghi.int_val = 1011; + emitter_table_row(emitter, &row); + + abc.type = emitter_type_string; + abc.str_val = "a string"; + def.bool_val = false; + ghi.type = emitter_type_title; + ghi.str_val = "ghi"; + emitter_table_row(emitter, &row); + + emitter_end(emitter); +} + +static const char *table_row_json = +"{\n" +"}\n"; + +static const char *table_row_table = +"ABC title DEF title GHI\n" +"123 true 456\n" +"789 false 1011\n" +"\"a string\" false ghi\n"; + +TEST_BEGIN(test_table_row) { + assert_emit_output(&emit_table_row, table_row_json, table_row_table); +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_dict, + test_table_printf, + test_nested_dict, + test_types, + test_modal, + test_json_arr, + test_json_nested_arr, + test_table_row); +} diff --git a/deps/jemalloc/test/unit/extent_quantize.c b/deps/jemalloc/test/unit/extent_quantize.c new file mode 100644 index 0000000..0ca7a75 --- /dev/null +++ b/deps/jemalloc/test/unit/extent_quantize.c @@ -0,0 +1,141 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_small_extent_size) { + unsigned nbins, i; + size_t sz, extent_size; + size_t mib[4]; + size_t miblen = sizeof(mib) / sizeof(size_t); + + /* + * Iterate over all small size classes, get their extent sizes, and + * verify that the quantized size is the same as the extent size. + */ + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); + + assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0, + "Unexpected mallctlnametomib failure"); + for (i = 0; i < nbins; i++) { + mib[2] = i; + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz, + NULL, 0), 0, "Unexpected mallctlbymib failure"); + assert_zu_eq(extent_size, + extent_size_quantize_floor(extent_size), + "Small extent quantization should be a no-op " + "(extent_size=%zu)", extent_size); + assert_zu_eq(extent_size, + extent_size_quantize_ceil(extent_size), + "Small extent quantization should be a no-op " + "(extent_size=%zu)", extent_size); + } +} +TEST_END + +TEST_BEGIN(test_large_extent_size) { + bool cache_oblivious; + unsigned nlextents, i; + size_t sz, extent_size_prev, ceil_prev; + size_t mib[4]; + size_t miblen = sizeof(mib) / sizeof(size_t); + + /* + * Iterate over all large size classes, get their extent sizes, and + * verify that the quantized size is the same as the extent size. + */ + + sz = sizeof(bool); + assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious, + &sz, NULL, 0), 0, "Unexpected mallctl failure"); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + + assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib failure"); + for (i = 0; i < nlextents; i++) { + size_t lextent_size, extent_size, floor, ceil; + + mib[2] = i; + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size, + &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); + extent_size = cache_oblivious ? lextent_size + PAGE : + lextent_size; + floor = extent_size_quantize_floor(extent_size); + ceil = extent_size_quantize_ceil(extent_size); + + assert_zu_eq(extent_size, floor, + "Extent quantization should be a no-op for precise size " + "(lextent_size=%zu, extent_size=%zu)", lextent_size, + extent_size); + assert_zu_eq(extent_size, ceil, + "Extent quantization should be a no-op for precise size " + "(lextent_size=%zu, extent_size=%zu)", lextent_size, + extent_size); + + if (i > 0) { + assert_zu_eq(extent_size_prev, + extent_size_quantize_floor(extent_size - PAGE), + "Floor should be a precise size"); + if (extent_size_prev < ceil_prev) { + assert_zu_eq(ceil_prev, extent_size, + "Ceiling should be a precise size " + "(extent_size_prev=%zu, ceil_prev=%zu, " + "extent_size=%zu)", extent_size_prev, + ceil_prev, extent_size); + } + } + if (i + 1 < nlextents) { + extent_size_prev = floor; + ceil_prev = extent_size_quantize_ceil(extent_size + + PAGE); + } + } +} +TEST_END + +TEST_BEGIN(test_monotonic) { +#define SZ_MAX ZU(4 * 1024 * 1024) + unsigned i; + size_t floor_prev, ceil_prev; + + floor_prev = 0; + ceil_prev = 0; + for (i = 1; i <= SZ_MAX >> LG_PAGE; i++) { + size_t extent_size, floor, ceil; + + extent_size = i << LG_PAGE; + floor = extent_size_quantize_floor(extent_size); + ceil = extent_size_quantize_ceil(extent_size); + + assert_zu_le(floor, extent_size, + "Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)", + floor, extent_size, ceil); + assert_zu_ge(ceil, extent_size, + "Ceiling should be >= (floor=%zu, extent_size=%zu, " + "ceil=%zu)", floor, extent_size, ceil); + + assert_zu_le(floor_prev, floor, "Floor should be monotonic " + "(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)", + floor_prev, floor, extent_size, ceil); + assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic " + "(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)", + floor, extent_size, ceil_prev, ceil); + + floor_prev = floor; + ceil_prev = ceil; + } +} +TEST_END + +int +main(void) { + return test( + test_small_extent_size, + test_large_extent_size, + test_monotonic); +} diff --git a/deps/jemalloc/test/unit/extent_util.c b/deps/jemalloc/test/unit/extent_util.c new file mode 100644 index 0000000..97e55f0 --- /dev/null +++ b/deps/jemalloc/test/unit/extent_util.c @@ -0,0 +1,269 @@ +#include "test/jemalloc_test.h" + +#define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) do { \ + assert_d_eq(mallctl("experimental.utilization." node, \ + a, b, c, d), EINVAL, "Should fail when " why_inval); \ + assert_zu_eq(out_sz, out_sz_ref, \ + "Output size touched when given invalid arguments"); \ + assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0, \ + "Output content touched when given invalid arguments"); \ +} while (0) + +#define TEST_UTIL_QUERY_EINVAL(a, b, c, d, why_inval) \ + TEST_UTIL_EINVAL("query", a, b, c, d, why_inval) +#define TEST_UTIL_BATCH_EINVAL(a, b, c, d, why_inval) \ + TEST_UTIL_EINVAL("batch_query", a, b, c, d, why_inval) + +#define TEST_UTIL_VALID(node) do { \ + assert_d_eq(mallctl("experimental.utilization." node, \ + out, &out_sz, in, in_sz), 0, \ + "Should return 0 on correct arguments"); \ + assert_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \ + assert_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \ + "Output content should be changed"); \ +} while (0) + +#define TEST_UTIL_BATCH_VALID TEST_UTIL_VALID("batch_query") + +#define TEST_MAX_SIZE (1 << 20) + +TEST_BEGIN(test_query) { + size_t sz; + /* + * Select some sizes that can span both small and large sizes, and are + * numerically unrelated to any size boundaries. + */ + for (sz = 7; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS; + sz += (sz <= SC_SMALL_MAXCLASS ? 1009 : 99989)) { + void *p = mallocx(sz, 0); + void **in = &p; + size_t in_sz = sizeof(const void *); + size_t out_sz = sizeof(void *) + sizeof(size_t) * 5; + void *out = mallocx(out_sz, 0); + void *out_ref = mallocx(out_sz, 0); + size_t out_sz_ref = out_sz; + + assert_ptr_not_null(p, + "test pointer allocation failed"); + assert_ptr_not_null(out, + "test output allocation failed"); + assert_ptr_not_null(out_ref, + "test reference output allocation failed"); + +#define SLABCUR_READ(out) (*(void **)out) +#define COUNTS(out) ((size_t *)((void **)out + 1)) +#define NFREE_READ(out) COUNTS(out)[0] +#define NREGS_READ(out) COUNTS(out)[1] +#define SIZE_READ(out) COUNTS(out)[2] +#define BIN_NFREE_READ(out) COUNTS(out)[3] +#define BIN_NREGS_READ(out) COUNTS(out)[4] + + SLABCUR_READ(out) = NULL; + NFREE_READ(out) = NREGS_READ(out) = SIZE_READ(out) = -1; + BIN_NFREE_READ(out) = BIN_NREGS_READ(out) = -1; + memcpy(out_ref, out, out_sz); + + /* Test invalid argument(s) errors */ + TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz, + "old is NULL"); + TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz, + "oldlenp is NULL"); + TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz, + "newp is NULL"); + TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0, + "newlen is zero"); + in_sz -= 1; + TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, + "invalid newlen"); + in_sz += 1; + out_sz_ref = out_sz -= 2 * sizeof(size_t); + TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz, + "invalid *oldlenp"); + out_sz_ref = out_sz += 2 * sizeof(size_t); + + /* Examine output for valid call */ + TEST_UTIL_VALID("query"); + assert_zu_le(sz, SIZE_READ(out), + "Extent size should be at least allocation size"); + assert_zu_eq(SIZE_READ(out) & (PAGE - 1), 0, + "Extent size should be a multiple of page size"); + if (sz <= SC_SMALL_MAXCLASS) { + assert_zu_le(NFREE_READ(out), NREGS_READ(out), + "Extent free count exceeded region count"); + assert_zu_le(NREGS_READ(out), SIZE_READ(out), + "Extent region count exceeded size"); + assert_zu_ne(NREGS_READ(out), 0, + "Extent region count must be positive"); + assert_ptr_not_null(SLABCUR_READ(out), + "Current slab is null"); + assert_true(NFREE_READ(out) == 0 + || SLABCUR_READ(out) <= p, + "Allocation should follow first fit principle"); + if (config_stats) { + assert_zu_le(BIN_NFREE_READ(out), + BIN_NREGS_READ(out), + "Bin free count exceeded region count"); + assert_zu_ne(BIN_NREGS_READ(out), 0, + "Bin region count must be positive"); + assert_zu_le(NFREE_READ(out), + BIN_NFREE_READ(out), + "Extent free count exceeded bin free count"); + assert_zu_le(NREGS_READ(out), + BIN_NREGS_READ(out), + "Extent region count exceeded " + "bin region count"); + assert_zu_eq(BIN_NREGS_READ(out) + % NREGS_READ(out), 0, + "Bin region count isn't a multiple of " + "extent region count"); + assert_zu_le( + BIN_NFREE_READ(out) - NFREE_READ(out), + BIN_NREGS_READ(out) - NREGS_READ(out), + "Free count in other extents in the bin " + "exceeded region count in other extents " + "in the bin"); + assert_zu_le(NREGS_READ(out) - NFREE_READ(out), + BIN_NREGS_READ(out) - BIN_NFREE_READ(out), + "Extent utilized count exceeded " + "bin utilized count"); + } + } else { + assert_zu_eq(NFREE_READ(out), 0, + "Extent free count should be zero"); + assert_zu_eq(NREGS_READ(out), 1, + "Extent region count should be one"); + assert_ptr_null(SLABCUR_READ(out), + "Current slab must be null for large size classes"); + if (config_stats) { + assert_zu_eq(BIN_NFREE_READ(out), 0, + "Bin free count must be zero for " + "large sizes"); + assert_zu_eq(BIN_NREGS_READ(out), 0, + "Bin region count must be zero for " + "large sizes"); + } + } + +#undef BIN_NREGS_READ +#undef BIN_NFREE_READ +#undef SIZE_READ +#undef NREGS_READ +#undef NFREE_READ +#undef COUNTS +#undef SLABCUR_READ + + free(out_ref); + free(out); + free(p); + } +} +TEST_END + +TEST_BEGIN(test_batch) { + size_t sz; + /* + * Select some sizes that can span both small and large sizes, and are + * numerically unrelated to any size boundaries. + */ + for (sz = 17; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS; + sz += (sz <= SC_SMALL_MAXCLASS ? 1019 : 99991)) { + void *p = mallocx(sz, 0); + void *q = mallocx(sz, 0); + void *in[] = {p, q}; + size_t in_sz = sizeof(const void *) * 2; + size_t out[] = {-1, -1, -1, -1, -1, -1}; + size_t out_sz = sizeof(size_t) * 6; + size_t out_ref[] = {-1, -1, -1, -1, -1, -1}; + size_t out_sz_ref = out_sz; + + assert_ptr_not_null(p, "test pointer allocation failed"); + assert_ptr_not_null(q, "test pointer allocation failed"); + + /* Test invalid argument(s) errors */ + TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz, + "old is NULL"); + TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz, + "oldlenp is NULL"); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz, + "newp is NULL"); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0, + "newlen is zero"); + in_sz -= 1; + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, + "newlen is not an exact multiple"); + in_sz += 1; + out_sz_ref = out_sz -= 2 * sizeof(size_t); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, + "*oldlenp is not an exact multiple"); + out_sz_ref = out_sz += 2 * sizeof(size_t); + in_sz -= sizeof(const void *); + TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz, + "*oldlenp and newlen do not match"); + in_sz += sizeof(const void *); + + /* Examine output for valid calls */ +#define TEST_EQUAL_REF(i, message) \ + assert_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message) + +#define NFREE_READ(out, i) out[(i) * 3] +#define NREGS_READ(out, i) out[(i) * 3 + 1] +#define SIZE_READ(out, i) out[(i) * 3 + 2] + + out_sz_ref = out_sz /= 2; + in_sz /= 2; + TEST_UTIL_BATCH_VALID; + assert_zu_le(sz, SIZE_READ(out, 0), + "Extent size should be at least allocation size"); + assert_zu_eq(SIZE_READ(out, 0) & (PAGE - 1), 0, + "Extent size should be a multiple of page size"); + if (sz <= SC_SMALL_MAXCLASS) { + assert_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0), + "Extent free count exceeded region count"); + assert_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0), + "Extent region count exceeded size"); + assert_zu_ne(NREGS_READ(out, 0), 0, + "Extent region count must be positive"); + } else { + assert_zu_eq(NFREE_READ(out, 0), 0, + "Extent free count should be zero"); + assert_zu_eq(NREGS_READ(out, 0), 1, + "Extent region count should be one"); + } + TEST_EQUAL_REF(1, + "Should not overwrite content beyond what's needed"); + in_sz *= 2; + out_sz_ref = out_sz *= 2; + + memcpy(out_ref, out, 3 * sizeof(size_t)); + TEST_UTIL_BATCH_VALID; + TEST_EQUAL_REF(0, "Statistics should be stable across calls"); + if (sz <= SC_SMALL_MAXCLASS) { + assert_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1), + "Extent free count exceeded region count"); + } else { + assert_zu_eq(NFREE_READ(out, 0), 0, + "Extent free count should be zero"); + } + assert_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1), + "Extent region count should be same for same region size"); + assert_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1), + "Extent size should be same for same region size"); + +#undef SIZE_READ +#undef NREGS_READ +#undef NFREE_READ + +#undef TEST_EQUAL_REF + + free(q); + free(p); + } +} +TEST_END + +int +main(void) { + assert_zu_lt(SC_SMALL_MAXCLASS, TEST_MAX_SIZE, + "Test case cannot cover large classes"); + return test(test_query, test_batch); +} diff --git a/deps/jemalloc/test/unit/fork.c b/deps/jemalloc/test/unit/fork.c new file mode 100644 index 0000000..b169075 --- /dev/null +++ b/deps/jemalloc/test/unit/fork.c @@ -0,0 +1,141 @@ +#include "test/jemalloc_test.h" + +#ifndef _WIN32 +#include +#endif + +#ifndef _WIN32 +static void +wait_for_child_exit(int pid) { + int status; + while (true) { + if (waitpid(pid, &status, 0) == -1) { + test_fail("Unexpected waitpid() failure."); + } + if (WIFSIGNALED(status)) { + test_fail("Unexpected child termination due to " + "signal %d", WTERMSIG(status)); + break; + } + if (WIFEXITED(status)) { + if (WEXITSTATUS(status) != 0) { + test_fail("Unexpected child exit value %d", + WEXITSTATUS(status)); + } + break; + } + } +} +#endif + +TEST_BEGIN(test_fork) { +#ifndef _WIN32 + void *p; + pid_t pid; + + /* Set up a manually managed arena for test. */ + unsigned arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + + /* Migrate to the new arena. */ + unsigned old_arena_ind; + sz = sizeof(old_arena_ind); + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + (void *)&arena_ind, sizeof(arena_ind)), 0, + "Unexpected mallctl() failure"); + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + + pid = fork(); + + free(p); + + p = malloc(64); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + free(p); + + if (pid == -1) { + /* Error. */ + test_fail("Unexpected fork() failure"); + } else if (pid == 0) { + /* Child. */ + _exit(0); + } else { + wait_for_child_exit(pid); + } +#else + test_skip("fork(2) is irrelevant to Windows"); +#endif +} +TEST_END + +#ifndef _WIN32 +static void * +do_fork_thd(void *arg) { + malloc(1); + int pid = fork(); + if (pid == -1) { + /* Error. */ + test_fail("Unexpected fork() failure"); + } else if (pid == 0) { + /* Child. */ + char *args[] = {"true", NULL}; + execvp(args[0], args); + test_fail("Exec failed"); + } else { + /* Parent */ + wait_for_child_exit(pid); + } + return NULL; +} +#endif + +#ifndef _WIN32 +static void +do_test_fork_multithreaded() { + thd_t child; + thd_create(&child, do_fork_thd, NULL); + do_fork_thd(NULL); + thd_join(child, NULL); +} +#endif + +TEST_BEGIN(test_fork_multithreaded) { +#ifndef _WIN32 + /* + * We've seen bugs involving hanging on arenas_lock (though the same + * class of bugs can happen on any mutex). The bugs are intermittent + * though, so we want to run the test multiple times. Since we hold the + * arenas lock only early in the process lifetime, we can't just run + * this test in a loop (since, after all the arenas are initialized, we + * won't acquire arenas_lock any further). We therefore repeat the test + * with multiple processes. + */ + for (int i = 0; i < 100; i++) { + int pid = fork(); + if (pid == -1) { + /* Error. */ + test_fail("Unexpected fork() failure,"); + } else if (pid == 0) { + /* Child. */ + do_test_fork_multithreaded(); + _exit(0); + } else { + wait_for_child_exit(pid); + } + } +#else + test_skip("fork(2) is irrelevant to Windows"); +#endif +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_fork, + test_fork_multithreaded); +} diff --git a/deps/jemalloc/test/unit/hash.c b/deps/jemalloc/test/unit/hash.c new file mode 100644 index 0000000..7cc034f --- /dev/null +++ b/deps/jemalloc/test/unit/hash.c @@ -0,0 +1,173 @@ +/* + * This file is based on code that is part of SMHasher + * (https://code.google.com/p/smhasher/), and is subject to the MIT license + * (http://www.opensource.org/licenses/mit-license.php). Both email addresses + * associated with the source code's revision history belong to Austin Appleby, + * and the revision history ranges from 2010 to 2012. Therefore the copyright + * and license are here taken to be: + * + * Copyright (c) 2010-2012 Austin Appleby + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "test/jemalloc_test.h" +#include "jemalloc/internal/hash.h" + +typedef enum { + hash_variant_x86_32, + hash_variant_x86_128, + hash_variant_x64_128 +} hash_variant_t; + +static int +hash_variant_bits(hash_variant_t variant) { + switch (variant) { + case hash_variant_x86_32: return 32; + case hash_variant_x86_128: return 128; + case hash_variant_x64_128: return 128; + default: not_reached(); + } +} + +static const char * +hash_variant_string(hash_variant_t variant) { + switch (variant) { + case hash_variant_x86_32: return "hash_x86_32"; + case hash_variant_x86_128: return "hash_x86_128"; + case hash_variant_x64_128: return "hash_x64_128"; + default: not_reached(); + } +} + +#define KEY_SIZE 256 +static void +hash_variant_verify_key(hash_variant_t variant, uint8_t *key) { + const int hashbytes = hash_variant_bits(variant) / 8; + const int hashes_size = hashbytes * 256; + VARIABLE_ARRAY(uint8_t, hashes, hashes_size); + VARIABLE_ARRAY(uint8_t, final, hashbytes); + unsigned i; + uint32_t computed, expected; + + memset(key, 0, KEY_SIZE); + memset(hashes, 0, hashes_size); + memset(final, 0, hashbytes); + + /* + * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the + * seed. + */ + for (i = 0; i < 256; i++) { + key[i] = (uint8_t)i; + switch (variant) { + case hash_variant_x86_32: { + uint32_t out; + out = hash_x86_32(key, i, 256-i); + memcpy(&hashes[i*hashbytes], &out, hashbytes); + break; + } case hash_variant_x86_128: { + uint64_t out[2]; + hash_x86_128(key, i, 256-i, out); + memcpy(&hashes[i*hashbytes], out, hashbytes); + break; + } case hash_variant_x64_128: { + uint64_t out[2]; + hash_x64_128(key, i, 256-i, out); + memcpy(&hashes[i*hashbytes], out, hashbytes); + break; + } default: not_reached(); + } + } + + /* Hash the result array. */ + switch (variant) { + case hash_variant_x86_32: { + uint32_t out = hash_x86_32(hashes, hashes_size, 0); + memcpy(final, &out, sizeof(out)); + break; + } case hash_variant_x86_128: { + uint64_t out[2]; + hash_x86_128(hashes, hashes_size, 0, out); + memcpy(final, out, sizeof(out)); + break; + } case hash_variant_x64_128: { + uint64_t out[2]; + hash_x64_128(hashes, hashes_size, 0, out); + memcpy(final, out, sizeof(out)); + break; + } default: not_reached(); + } + + computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) | + (final[3] << 24); + + switch (variant) { +#ifdef JEMALLOC_BIG_ENDIAN + case hash_variant_x86_32: expected = 0x6213303eU; break; + case hash_variant_x86_128: expected = 0x266820caU; break; + case hash_variant_x64_128: expected = 0xcc622b6fU; break; +#else + case hash_variant_x86_32: expected = 0xb0f57ee3U; break; + case hash_variant_x86_128: expected = 0xb3ece62aU; break; + case hash_variant_x64_128: expected = 0x6384ba69U; break; +#endif + default: not_reached(); + } + + assert_u32_eq(computed, expected, + "Hash mismatch for %s(): expected %#x but got %#x", + hash_variant_string(variant), expected, computed); +} + +static void +hash_variant_verify(hash_variant_t variant) { +#define MAX_ALIGN 16 + uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)]; + unsigned i; + + for (i = 0; i < MAX_ALIGN; i++) { + hash_variant_verify_key(variant, &key[i]); + } +#undef MAX_ALIGN +} +#undef KEY_SIZE + +TEST_BEGIN(test_hash_x86_32) { + hash_variant_verify(hash_variant_x86_32); +} +TEST_END + +TEST_BEGIN(test_hash_x86_128) { + hash_variant_verify(hash_variant_x86_128); +} +TEST_END + +TEST_BEGIN(test_hash_x64_128) { + hash_variant_verify(hash_variant_x64_128); +} +TEST_END + +int +main(void) { + return test( + test_hash_x86_32, + test_hash_x86_128, + test_hash_x64_128); +} diff --git a/deps/jemalloc/test/unit/hook.c b/deps/jemalloc/test/unit/hook.c new file mode 100644 index 0000000..72fcc43 --- /dev/null +++ b/deps/jemalloc/test/unit/hook.c @@ -0,0 +1,580 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/hook.h" + +static void *arg_extra; +static int arg_type; +static void *arg_result; +static void *arg_address; +static size_t arg_old_usize; +static size_t arg_new_usize; +static uintptr_t arg_result_raw; +static uintptr_t arg_args_raw[4]; + +static int call_count = 0; + +static void +reset_args() { + arg_extra = NULL; + arg_type = 12345; + arg_result = NULL; + arg_address = NULL; + arg_old_usize = 0; + arg_new_usize = 0; + arg_result_raw = 0; + memset(arg_args_raw, 77, sizeof(arg_args_raw)); +} + +static void +alloc_free_size(size_t sz) { + void *ptr = mallocx(1, 0); + free(ptr); + ptr = mallocx(1, 0); + free(ptr); + ptr = mallocx(1, MALLOCX_TCACHE_NONE); + dallocx(ptr, MALLOCX_TCACHE_NONE); +} + +/* + * We want to support a degree of user reentrancy. This tests a variety of + * allocation scenarios. + */ +static void +be_reentrant() { + /* Let's make sure the tcache is non-empty if enabled. */ + alloc_free_size(1); + alloc_free_size(1024); + alloc_free_size(64 * 1024); + alloc_free_size(256 * 1024); + alloc_free_size(1024 * 1024); + + /* Some reallocation. */ + void *ptr = mallocx(129, 0); + ptr = rallocx(ptr, 130, 0); + free(ptr); + + ptr = mallocx(2 * 1024 * 1024, 0); + free(ptr); + ptr = mallocx(1 * 1024 * 1024, 0); + ptr = rallocx(ptr, 2 * 1024 * 1024, 0); + free(ptr); + + ptr = mallocx(1, 0); + ptr = rallocx(ptr, 1000, 0); + free(ptr); +} + +static void +set_args_raw(uintptr_t *args_raw, int nargs) { + memcpy(arg_args_raw, args_raw, sizeof(uintptr_t) * nargs); +} + +static void +assert_args_raw(uintptr_t *args_raw_expected, int nargs) { + int cmp = memcmp(args_raw_expected, arg_args_raw, + sizeof(uintptr_t) * nargs); + assert_d_eq(cmp, 0, "Raw args mismatch"); +} + +static void +reset() { + call_count = 0; + reset_args(); +} + +static void +test_alloc_hook(void *extra, hook_alloc_t type, void *result, + uintptr_t result_raw, uintptr_t args_raw[3]) { + call_count++; + arg_extra = extra; + arg_type = (int)type; + arg_result = result; + arg_result_raw = result_raw; + set_args_raw(args_raw, 3); + be_reentrant(); +} + +static void +test_dalloc_hook(void *extra, hook_dalloc_t type, void *address, + uintptr_t args_raw[3]) { + call_count++; + arg_extra = extra; + arg_type = (int)type; + arg_address = address; + set_args_raw(args_raw, 3); + be_reentrant(); +} + +static void +test_expand_hook(void *extra, hook_expand_t type, void *address, + size_t old_usize, size_t new_usize, uintptr_t result_raw, + uintptr_t args_raw[4]) { + call_count++; + arg_extra = extra; + arg_type = (int)type; + arg_address = address; + arg_old_usize = old_usize; + arg_new_usize = new_usize; + arg_result_raw = result_raw; + set_args_raw(args_raw, 4); + be_reentrant(); +} + +TEST_BEGIN(test_hooks_basic) { + /* Just verify that the record their arguments correctly. */ + hooks_t hooks = { + &test_alloc_hook, &test_dalloc_hook, &test_expand_hook, + (void *)111}; + void *handle = hook_install(TSDN_NULL, &hooks); + uintptr_t args_raw[4] = {10, 20, 30, 40}; + + /* Alloc */ + reset_args(); + hook_invoke_alloc(hook_alloc_posix_memalign, (void *)222, 333, + args_raw); + assert_ptr_eq(arg_extra, (void *)111, "Passed wrong user pointer"); + assert_d_eq((int)hook_alloc_posix_memalign, arg_type, + "Passed wrong alloc type"); + assert_ptr_eq((void *)222, arg_result, "Passed wrong result address"); + assert_u64_eq(333, arg_result_raw, "Passed wrong result"); + assert_args_raw(args_raw, 3); + + /* Dalloc */ + reset_args(); + hook_invoke_dalloc(hook_dalloc_sdallocx, (void *)222, args_raw); + assert_d_eq((int)hook_dalloc_sdallocx, arg_type, + "Passed wrong dalloc type"); + assert_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer"); + assert_ptr_eq((void *)222, arg_address, "Passed wrong address"); + assert_args_raw(args_raw, 3); + + /* Expand */ + reset_args(); + hook_invoke_expand(hook_expand_xallocx, (void *)222, 333, 444, 555, + args_raw); + assert_d_eq((int)hook_expand_xallocx, arg_type, + "Passed wrong expand type"); + assert_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer"); + assert_ptr_eq((void *)222, arg_address, "Passed wrong address"); + assert_zu_eq(333, arg_old_usize, "Passed wrong old usize"); + assert_zu_eq(444, arg_new_usize, "Passed wrong new usize"); + assert_zu_eq(555, arg_result_raw, "Passed wrong result"); + assert_args_raw(args_raw, 4); + + hook_remove(TSDN_NULL, handle); +} +TEST_END + +TEST_BEGIN(test_hooks_null) { + /* Null hooks should be ignored, not crash. */ + hooks_t hooks1 = {NULL, NULL, NULL, NULL}; + hooks_t hooks2 = {&test_alloc_hook, NULL, NULL, NULL}; + hooks_t hooks3 = {NULL, &test_dalloc_hook, NULL, NULL}; + hooks_t hooks4 = {NULL, NULL, &test_expand_hook, NULL}; + + void *handle1 = hook_install(TSDN_NULL, &hooks1); + void *handle2 = hook_install(TSDN_NULL, &hooks2); + void *handle3 = hook_install(TSDN_NULL, &hooks3); + void *handle4 = hook_install(TSDN_NULL, &hooks4); + + assert_ptr_ne(handle1, NULL, "Hook installation failed"); + assert_ptr_ne(handle2, NULL, "Hook installation failed"); + assert_ptr_ne(handle3, NULL, "Hook installation failed"); + assert_ptr_ne(handle4, NULL, "Hook installation failed"); + + uintptr_t args_raw[4] = {10, 20, 30, 40}; + + call_count = 0; + hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw); + assert_d_eq(call_count, 1, "Called wrong number of times"); + + call_count = 0; + hook_invoke_dalloc(hook_dalloc_free, NULL, args_raw); + assert_d_eq(call_count, 1, "Called wrong number of times"); + + call_count = 0; + hook_invoke_expand(hook_expand_realloc, NULL, 0, 0, 0, args_raw); + assert_d_eq(call_count, 1, "Called wrong number of times"); + + hook_remove(TSDN_NULL, handle1); + hook_remove(TSDN_NULL, handle2); + hook_remove(TSDN_NULL, handle3); + hook_remove(TSDN_NULL, handle4); +} +TEST_END + +TEST_BEGIN(test_hooks_remove) { + hooks_t hooks = {&test_alloc_hook, NULL, NULL, NULL}; + void *handle = hook_install(TSDN_NULL, &hooks); + assert_ptr_ne(handle, NULL, "Hook installation failed"); + call_count = 0; + uintptr_t args_raw[4] = {10, 20, 30, 40}; + hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw); + assert_d_eq(call_count, 1, "Hook not invoked"); + + call_count = 0; + hook_remove(TSDN_NULL, handle); + hook_invoke_alloc(hook_alloc_malloc, NULL, 0, NULL); + assert_d_eq(call_count, 0, "Hook invoked after removal"); + +} +TEST_END + +TEST_BEGIN(test_hooks_alloc_simple) { + /* "Simple" in the sense that we're not in a realloc variant. */ + hooks_t hooks = {&test_alloc_hook, NULL, NULL, (void *)123}; + void *handle = hook_install(TSDN_NULL, &hooks); + assert_ptr_ne(handle, NULL, "Hook installation failed"); + + /* Stop malloc from being optimized away. */ + volatile int err; + void *volatile ptr; + + /* malloc */ + reset(); + ptr = malloc(1); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_alloc_malloc, "Wrong hook type"); + assert_ptr_eq(ptr, arg_result, "Wrong result"); + assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, + "Wrong raw result"); + assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument"); + free(ptr); + + /* posix_memalign */ + reset(); + err = posix_memalign((void **)&ptr, 1024, 1); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_alloc_posix_memalign, + "Wrong hook type"); + assert_ptr_eq(ptr, arg_result, "Wrong result"); + assert_u64_eq((uintptr_t)err, (uintptr_t)arg_result_raw, + "Wrong raw result"); + assert_u64_eq((uintptr_t)&ptr, arg_args_raw[0], "Wrong argument"); + assert_u64_eq((uintptr_t)1024, arg_args_raw[1], "Wrong argument"); + assert_u64_eq((uintptr_t)1, arg_args_raw[2], "Wrong argument"); + free(ptr); + + /* aligned_alloc */ + reset(); + ptr = aligned_alloc(1024, 1); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_alloc_aligned_alloc, + "Wrong hook type"); + assert_ptr_eq(ptr, arg_result, "Wrong result"); + assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, + "Wrong raw result"); + assert_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument"); + assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument"); + free(ptr); + + /* calloc */ + reset(); + ptr = calloc(11, 13); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_alloc_calloc, "Wrong hook type"); + assert_ptr_eq(ptr, arg_result, "Wrong result"); + assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, + "Wrong raw result"); + assert_u64_eq((uintptr_t)11, arg_args_raw[0], "Wrong argument"); + assert_u64_eq((uintptr_t)13, arg_args_raw[1], "Wrong argument"); + free(ptr); + + /* memalign */ +#ifdef JEMALLOC_OVERRIDE_MEMALIGN + reset(); + ptr = memalign(1024, 1); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_alloc_memalign, "Wrong hook type"); + assert_ptr_eq(ptr, arg_result, "Wrong result"); + assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, + "Wrong raw result"); + assert_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument"); + assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument"); + free(ptr); +#endif /* JEMALLOC_OVERRIDE_MEMALIGN */ + + /* valloc */ +#ifdef JEMALLOC_OVERRIDE_VALLOC + reset(); + ptr = valloc(1); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_alloc_valloc, "Wrong hook type"); + assert_ptr_eq(ptr, arg_result, "Wrong result"); + assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, + "Wrong raw result"); + assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument"); + free(ptr); +#endif /* JEMALLOC_OVERRIDE_VALLOC */ + + /* mallocx */ + reset(); + ptr = mallocx(1, MALLOCX_LG_ALIGN(10)); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_alloc_mallocx, "Wrong hook type"); + assert_ptr_eq(ptr, arg_result, "Wrong result"); + assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, + "Wrong raw result"); + assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument"); + assert_u64_eq((uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1], + "Wrong flags"); + free(ptr); + + hook_remove(TSDN_NULL, handle); +} +TEST_END + +TEST_BEGIN(test_hooks_dalloc_simple) { + /* "Simple" in the sense that we're not in a realloc variant. */ + hooks_t hooks = {NULL, &test_dalloc_hook, NULL, (void *)123}; + void *handle = hook_install(TSDN_NULL, &hooks); + assert_ptr_ne(handle, NULL, "Hook installation failed"); + + void *volatile ptr; + + /* free() */ + reset(); + ptr = malloc(1); + free(ptr); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type"); + assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); + assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); + + /* dallocx() */ + reset(); + ptr = malloc(1); + dallocx(ptr, MALLOCX_TCACHE_NONE); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type"); + assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); + assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); + assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1], + "Wrong raw arg"); + + /* sdallocx() */ + reset(); + ptr = malloc(1); + sdallocx(ptr, 1, MALLOCX_TCACHE_NONE); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type"); + assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); + assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); + assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg"); + assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2], + "Wrong raw arg"); + + hook_remove(TSDN_NULL, handle); +} +TEST_END + +TEST_BEGIN(test_hooks_expand_simple) { + /* "Simple" in the sense that we're not in a realloc variant. */ + hooks_t hooks = {NULL, NULL, &test_expand_hook, (void *)123}; + void *handle = hook_install(TSDN_NULL, &hooks); + assert_ptr_ne(handle, NULL, "Hook installation failed"); + + void *volatile ptr; + + /* xallocx() */ + reset(); + ptr = malloc(1); + size_t new_usize = xallocx(ptr, 100, 200, MALLOCX_TCACHE_NONE); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_expand_xallocx, "Wrong hook type"); + assert_ptr_eq(ptr, arg_address, "Wrong pointer expanded"); + assert_u64_eq(arg_old_usize, nallocx(1, 0), "Wrong old usize"); + assert_u64_eq(arg_new_usize, sallocx(ptr, 0), "Wrong new usize"); + assert_u64_eq(new_usize, arg_result_raw, "Wrong result"); + assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong arg"); + assert_u64_eq(100, arg_args_raw[1], "Wrong arg"); + assert_u64_eq(200, arg_args_raw[2], "Wrong arg"); + assert_u64_eq(MALLOCX_TCACHE_NONE, arg_args_raw[3], "Wrong arg"); + + hook_remove(TSDN_NULL, handle); +} +TEST_END + +TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) { + hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook, + &test_expand_hook, (void *)123}; + void *handle = hook_install(TSDN_NULL, &hooks); + assert_ptr_ne(handle, NULL, "Hook installation failed"); + + void *volatile ptr; + + /* realloc(NULL, size) as malloc */ + reset(); + ptr = realloc(NULL, 1); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type"); + assert_ptr_eq(ptr, arg_result, "Wrong result"); + assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, + "Wrong raw result"); + assert_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument"); + assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument"); + free(ptr); + + /* realloc(ptr, 0) as free */ + ptr = malloc(1); + reset(); + realloc(ptr, 0); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_dalloc_realloc, "Wrong hook type"); + assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); + assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); + assert_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong raw arg"); + + /* realloc(NULL, 0) as malloc(0) */ + reset(); + ptr = realloc(NULL, 0); + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type"); + assert_ptr_eq(ptr, arg_result, "Wrong result"); + assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, + "Wrong raw result"); + assert_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument"); + assert_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong argument"); + free(ptr); + + hook_remove(TSDN_NULL, handle); +} +TEST_END + +static void +do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags, + int expand_type, int dalloc_type) { + hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook, + &test_expand_hook, (void *)123}; + void *handle = hook_install(TSDN_NULL, &hooks); + assert_ptr_ne(handle, NULL, "Hook installation failed"); + + void *volatile ptr; + void *volatile ptr2; + + /* Realloc in-place, small. */ + ptr = malloc(129); + reset(); + ptr2 = ralloc(ptr, 130, flags); + assert_ptr_eq(ptr, ptr2, "Small realloc moved"); + + assert_d_eq(call_count, 1, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, expand_type, "Wrong hook type"); + assert_ptr_eq(ptr, arg_address, "Wrong address"); + assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, + "Wrong raw result"); + assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument"); + assert_u64_eq((uintptr_t)130, arg_args_raw[1], "Wrong argument"); + free(ptr); + + /* + * Realloc in-place, large. Since we can't guarantee the large case + * across all platforms, we stay resilient to moving results. + */ + ptr = malloc(2 * 1024 * 1024); + free(ptr); + ptr2 = malloc(1 * 1024 * 1024); + reset(); + ptr = ralloc(ptr2, 2 * 1024 * 1024, flags); + /* ptr is the new address, ptr2 is the old address. */ + if (ptr == ptr2) { + assert_d_eq(call_count, 1, "Hook not called"); + assert_d_eq(arg_type, expand_type, "Wrong hook type"); + } else { + assert_d_eq(call_count, 2, "Wrong hooks called"); + assert_ptr_eq(ptr, arg_result, "Wrong address"); + assert_d_eq(arg_type, dalloc_type, "Wrong hook type"); + } + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_ptr_eq(ptr2, arg_address, "Wrong address"); + assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw, + "Wrong raw result"); + assert_u64_eq((uintptr_t)ptr2, arg_args_raw[0], "Wrong argument"); + assert_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1], + "Wrong argument"); + free(ptr); + + /* Realloc with move, small. */ + ptr = malloc(8); + reset(); + ptr2 = ralloc(ptr, 128, flags); + assert_ptr_ne(ptr, ptr2, "Small realloc didn't move"); + + assert_d_eq(call_count, 2, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, dalloc_type, "Wrong hook type"); + assert_ptr_eq(ptr, arg_address, "Wrong address"); + assert_ptr_eq(ptr2, arg_result, "Wrong address"); + assert_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw, + "Wrong raw result"); + assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument"); + assert_u64_eq((uintptr_t)128, arg_args_raw[1], "Wrong argument"); + free(ptr2); + + /* Realloc with move, large. */ + ptr = malloc(1); + reset(); + ptr2 = ralloc(ptr, 2 * 1024 * 1024, flags); + assert_ptr_ne(ptr, ptr2, "Large realloc didn't move"); + + assert_d_eq(call_count, 2, "Hook not called"); + assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); + assert_d_eq(arg_type, dalloc_type, "Wrong hook type"); + assert_ptr_eq(ptr, arg_address, "Wrong address"); + assert_ptr_eq(ptr2, arg_result, "Wrong address"); + assert_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw, + "Wrong raw result"); + assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument"); + assert_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1], + "Wrong argument"); + free(ptr2); + + hook_remove(TSDN_NULL, handle); +} + +static void * +realloc_wrapper(void *ptr, size_t size, UNUSED int flags) { + return realloc(ptr, size); +} + +TEST_BEGIN(test_hooks_realloc) { + do_realloc_test(&realloc_wrapper, 0, hook_expand_realloc, + hook_dalloc_realloc); +} +TEST_END + +TEST_BEGIN(test_hooks_rallocx) { + do_realloc_test(&rallocx, MALLOCX_TCACHE_NONE, hook_expand_rallocx, + hook_dalloc_rallocx); +} +TEST_END + +int +main(void) { + /* We assert on call counts. */ + return test_no_reentrancy( + test_hooks_basic, + test_hooks_null, + test_hooks_remove, + test_hooks_alloc_simple, + test_hooks_dalloc_simple, + test_hooks_expand_simple, + test_hooks_realloc_as_malloc_or_free, + test_hooks_realloc, + test_hooks_rallocx); +} diff --git a/deps/jemalloc/test/unit/huge.c b/deps/jemalloc/test/unit/huge.c new file mode 100644 index 0000000..ab72cf0 --- /dev/null +++ b/deps/jemalloc/test/unit/huge.c @@ -0,0 +1,108 @@ +#include "test/jemalloc_test.h" + +/* Threshold: 2 << 20 = 2097152. */ +const char *malloc_conf = "oversize_threshold:2097152"; + +#define HUGE_SZ (2 << 20) +#define SMALL_SZ (8) + +TEST_BEGIN(huge_bind_thread) { + unsigned arena1, arena2; + size_t sz = sizeof(unsigned); + + /* Bind to a manual arena. */ + assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0, + "Failed to create arena"); + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena1, + sizeof(arena1)), 0, "Fail to bind thread"); + + void *ptr = mallocx(HUGE_SZ, 0); + assert_ptr_not_null(ptr, "Fail to allocate huge size"); + assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, + sizeof(ptr)), 0, "Unexpected mallctl() failure"); + assert_u_eq(arena1, arena2, "Wrong arena used after binding"); + dallocx(ptr, 0); + + /* Switch back to arena 0. */ + test_skip_if(have_percpu_arena && + PERCPU_ARENA_ENABLED(opt_percpu_arena)); + arena2 = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena2, + sizeof(arena2)), 0, "Fail to bind thread"); + ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE); + assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, + sizeof(ptr)), 0, "Unexpected mallctl() failure"); + assert_u_eq(arena2, 0, "Wrong arena used after binding"); + dallocx(ptr, MALLOCX_TCACHE_NONE); + + /* Then huge allocation should use the huge arena. */ + ptr = mallocx(HUGE_SZ, 0); + assert_ptr_not_null(ptr, "Fail to allocate huge size"); + assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, + sizeof(ptr)), 0, "Unexpected mallctl() failure"); + assert_u_ne(arena2, 0, "Wrong arena used after binding"); + assert_u_ne(arena1, arena2, "Wrong arena used after binding"); + dallocx(ptr, 0); +} +TEST_END + +TEST_BEGIN(huge_mallocx) { + unsigned arena1, arena2; + size_t sz = sizeof(unsigned); + + assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0, + "Failed to create arena"); + void *huge = mallocx(HUGE_SZ, MALLOCX_ARENA(arena1)); + assert_ptr_not_null(huge, "Fail to allocate huge size"); + assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge, + sizeof(huge)), 0, "Unexpected mallctl() failure"); + assert_u_eq(arena1, arena2, "Wrong arena used for mallocx"); + dallocx(huge, MALLOCX_ARENA(arena1)); + + void *huge2 = mallocx(HUGE_SZ, 0); + assert_ptr_not_null(huge, "Fail to allocate huge size"); + assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2, + sizeof(huge2)), 0, "Unexpected mallctl() failure"); + assert_u_ne(arena1, arena2, + "Huge allocation should not come from the manual arena."); + assert_u_ne(arena2, 0, + "Huge allocation should not come from the arena 0."); + dallocx(huge2, 0); +} +TEST_END + +TEST_BEGIN(huge_allocation) { + unsigned arena1, arena2; + + void *ptr = mallocx(HUGE_SZ, 0); + assert_ptr_not_null(ptr, "Fail to allocate huge size"); + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)), + 0, "Unexpected mallctl() failure"); + assert_u_gt(arena1, 0, "Huge allocation should not come from arena 0"); + dallocx(ptr, 0); + + ptr = mallocx(HUGE_SZ >> 1, 0); + assert_ptr_not_null(ptr, "Fail to allocate half huge size"); + assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, + sizeof(ptr)), 0, "Unexpected mallctl() failure"); + assert_u_ne(arena1, arena2, "Wrong arena used for half huge"); + dallocx(ptr, 0); + + ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE); + assert_ptr_not_null(ptr, "Fail to allocate small size"); + assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, + sizeof(ptr)), 0, "Unexpected mallctl() failure"); + assert_u_ne(arena1, arena2, + "Huge and small should be from different arenas"); + dallocx(ptr, 0); +} +TEST_END + +int +main(void) { + return test( + huge_allocation, + huge_mallocx, + huge_bind_thread); +} diff --git a/deps/jemalloc/test/unit/junk.c b/deps/jemalloc/test/unit/junk.c new file mode 100644 index 0000000..57e3ad4 --- /dev/null +++ b/deps/jemalloc/test/unit/junk.c @@ -0,0 +1,141 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/util.h" + +static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; +static large_dalloc_junk_t *large_dalloc_junk_orig; +static large_dalloc_maybe_junk_t *large_dalloc_maybe_junk_orig; +static void *watch_for_junking; +static bool saw_junking; + +static void +watch_junking(void *p) { + watch_for_junking = p; + saw_junking = false; +} + +static void +arena_dalloc_junk_small_intercept(void *ptr, const bin_info_t *bin_info) { + size_t i; + + arena_dalloc_junk_small_orig(ptr, bin_info); + for (i = 0; i < bin_info->reg_size; i++) { + assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, + "Missing junk fill for byte %zu/%zu of deallocated region", + i, bin_info->reg_size); + } + if (ptr == watch_for_junking) { + saw_junking = true; + } +} + +static void +large_dalloc_junk_intercept(void *ptr, size_t usize) { + size_t i; + + large_dalloc_junk_orig(ptr, usize); + for (i = 0; i < usize; i++) { + assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, + "Missing junk fill for byte %zu/%zu of deallocated region", + i, usize); + } + if (ptr == watch_for_junking) { + saw_junking = true; + } +} + +static void +large_dalloc_maybe_junk_intercept(void *ptr, size_t usize) { + large_dalloc_maybe_junk_orig(ptr, usize); + if (ptr == watch_for_junking) { + saw_junking = true; + } +} + +static void +test_junk(size_t sz_min, size_t sz_max) { + uint8_t *s; + size_t sz_prev, sz, i; + + if (opt_junk_free) { + arena_dalloc_junk_small_orig = arena_dalloc_junk_small; + arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; + large_dalloc_junk_orig = large_dalloc_junk; + large_dalloc_junk = large_dalloc_junk_intercept; + large_dalloc_maybe_junk_orig = large_dalloc_maybe_junk; + large_dalloc_maybe_junk = large_dalloc_maybe_junk_intercept; + } + + sz_prev = 0; + s = (uint8_t *)mallocx(sz_min, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + + for (sz = sallocx(s, 0); sz <= sz_max; + sz_prev = sz, sz = sallocx(s, 0)) { + if (sz_prev > 0) { + assert_u_eq(s[0], 'a', + "Previously allocated byte %zu/%zu is corrupted", + ZU(0), sz_prev); + assert_u_eq(s[sz_prev-1], 'a', + "Previously allocated byte %zu/%zu is corrupted", + sz_prev-1, sz_prev); + } + + for (i = sz_prev; i < sz; i++) { + if (opt_junk_alloc) { + assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK, + "Newly allocated byte %zu/%zu isn't " + "junk-filled", i, sz); + } + s[i] = 'a'; + } + + if (xallocx(s, sz+1, 0, 0) == sz) { + uint8_t *t; + watch_junking(s); + t = (uint8_t *)rallocx(s, sz+1, 0); + assert_ptr_not_null((void *)t, + "Unexpected rallocx() failure"); + assert_zu_ge(sallocx(t, 0), sz+1, + "Unexpectedly small rallocx() result"); + if (!background_thread_enabled()) { + assert_ptr_ne(s, t, + "Unexpected in-place rallocx()"); + assert_true(!opt_junk_free || saw_junking, + "Expected region of size %zu to be " + "junk-filled", sz); + } + s = t; + } + } + + watch_junking(s); + dallocx(s, 0); + assert_true(!opt_junk_free || saw_junking, + "Expected region of size %zu to be junk-filled", sz); + + if (opt_junk_free) { + arena_dalloc_junk_small = arena_dalloc_junk_small_orig; + large_dalloc_junk = large_dalloc_junk_orig; + large_dalloc_maybe_junk = large_dalloc_maybe_junk_orig; + } +} + +TEST_BEGIN(test_junk_small) { + test_skip_if(!config_fill); + test_junk(1, SC_SMALL_MAXCLASS - 1); +} +TEST_END + +TEST_BEGIN(test_junk_large) { + test_skip_if(!config_fill); + test_junk(SC_SMALL_MAXCLASS + 1, (1U << (SC_LG_LARGE_MINCLASS + 1))); +} +TEST_END + +int +main(void) { + return test( + test_junk_small, + test_junk_large); +} diff --git a/deps/jemalloc/test/unit/junk.sh b/deps/jemalloc/test/unit/junk.sh new file mode 100644 index 0000000..97cd8ca --- /dev/null +++ b/deps/jemalloc/test/unit/junk.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="abort:false,zero:false,junk:true" +fi diff --git a/deps/jemalloc/test/unit/junk_alloc.c b/deps/jemalloc/test/unit/junk_alloc.c new file mode 100644 index 0000000..a442a0c --- /dev/null +++ b/deps/jemalloc/test/unit/junk_alloc.c @@ -0,0 +1 @@ +#include "junk.c" diff --git a/deps/jemalloc/test/unit/junk_alloc.sh b/deps/jemalloc/test/unit/junk_alloc.sh new file mode 100644 index 0000000..e1008c2 --- /dev/null +++ b/deps/jemalloc/test/unit/junk_alloc.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="abort:false,zero:false,junk:alloc" +fi diff --git a/deps/jemalloc/test/unit/junk_free.c b/deps/jemalloc/test/unit/junk_free.c new file mode 100644 index 0000000..a442a0c --- /dev/null +++ b/deps/jemalloc/test/unit/junk_free.c @@ -0,0 +1 @@ +#include "junk.c" diff --git a/deps/jemalloc/test/unit/junk_free.sh b/deps/jemalloc/test/unit/junk_free.sh new file mode 100644 index 0000000..402196c --- /dev/null +++ b/deps/jemalloc/test/unit/junk_free.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="abort:false,zero:false,junk:free" +fi diff --git a/deps/jemalloc/test/unit/log.c b/deps/jemalloc/test/unit/log.c new file mode 100644 index 0000000..a52bd73 --- /dev/null +++ b/deps/jemalloc/test/unit/log.c @@ -0,0 +1,193 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/log.h" + +static void +expect_no_logging(const char *names) { + log_var_t log_l1 = LOG_VAR_INIT("l1"); + log_var_t log_l2 = LOG_VAR_INIT("l2"); + log_var_t log_l2_a = LOG_VAR_INIT("l2.a"); + + strcpy(log_var_names, names); + + int count = 0; + + for (int i = 0; i < 10; i++) { + log_do_begin(log_l1) + count++; + log_do_end(log_l1) + + log_do_begin(log_l2) + count++; + log_do_end(log_l2) + + log_do_begin(log_l2_a) + count++; + log_do_end(log_l2_a) + } + assert_d_eq(count, 0, "Disabled logging not ignored!"); +} + +TEST_BEGIN(test_log_disabled) { + test_skip_if(!config_log); + atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); + expect_no_logging(""); + expect_no_logging("abc"); + expect_no_logging("a.b.c"); + expect_no_logging("l12"); + expect_no_logging("l123|a456|b789"); + expect_no_logging("|||"); +} +TEST_END + +TEST_BEGIN(test_log_enabled_direct) { + test_skip_if(!config_log); + atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); + log_var_t log_l1 = LOG_VAR_INIT("l1"); + log_var_t log_l1_a = LOG_VAR_INIT("l1.a"); + log_var_t log_l2 = LOG_VAR_INIT("l2"); + + int count; + + count = 0; + strcpy(log_var_names, "l1"); + for (int i = 0; i < 10; i++) { + log_do_begin(log_l1) + count++; + log_do_end(log_l1) + } + assert_d_eq(count, 10, "Mis-logged!"); + + count = 0; + strcpy(log_var_names, "l1.a"); + for (int i = 0; i < 10; i++) { + log_do_begin(log_l1_a) + count++; + log_do_end(log_l1_a) + } + assert_d_eq(count, 10, "Mis-logged!"); + + count = 0; + strcpy(log_var_names, "l1.a|abc|l2|def"); + for (int i = 0; i < 10; i++) { + log_do_begin(log_l1_a) + count++; + log_do_end(log_l1_a) + + log_do_begin(log_l2) + count++; + log_do_end(log_l2) + } + assert_d_eq(count, 20, "Mis-logged!"); +} +TEST_END + +TEST_BEGIN(test_log_enabled_indirect) { + test_skip_if(!config_log); + atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); + strcpy(log_var_names, "l0|l1|abc|l2.b|def"); + + /* On. */ + log_var_t log_l1 = LOG_VAR_INIT("l1"); + /* Off. */ + log_var_t log_l1a = LOG_VAR_INIT("l1a"); + /* On. */ + log_var_t log_l1_a = LOG_VAR_INIT("l1.a"); + /* Off. */ + log_var_t log_l2_a = LOG_VAR_INIT("l2.a"); + /* On. */ + log_var_t log_l2_b_a = LOG_VAR_INIT("l2.b.a"); + /* On. */ + log_var_t log_l2_b_b = LOG_VAR_INIT("l2.b.b"); + + /* 4 are on total, so should sum to 40. */ + int count = 0; + for (int i = 0; i < 10; i++) { + log_do_begin(log_l1) + count++; + log_do_end(log_l1) + + log_do_begin(log_l1a) + count++; + log_do_end(log_l1a) + + log_do_begin(log_l1_a) + count++; + log_do_end(log_l1_a) + + log_do_begin(log_l2_a) + count++; + log_do_end(log_l2_a) + + log_do_begin(log_l2_b_a) + count++; + log_do_end(log_l2_b_a) + + log_do_begin(log_l2_b_b) + count++; + log_do_end(log_l2_b_b) + } + + assert_d_eq(count, 40, "Mis-logged!"); +} +TEST_END + +TEST_BEGIN(test_log_enabled_global) { + test_skip_if(!config_log); + atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); + strcpy(log_var_names, "abc|.|def"); + + log_var_t log_l1 = LOG_VAR_INIT("l1"); + log_var_t log_l2_a_a = LOG_VAR_INIT("l2.a.a"); + + int count = 0; + for (int i = 0; i < 10; i++) { + log_do_begin(log_l1) + count++; + log_do_end(log_l1) + + log_do_begin(log_l2_a_a) + count++; + log_do_end(log_l2_a_a) + } + assert_d_eq(count, 20, "Mis-logged!"); +} +TEST_END + +TEST_BEGIN(test_logs_if_no_init) { + test_skip_if(!config_log); + atomic_store_b(&log_init_done, false, ATOMIC_RELAXED); + + log_var_t l = LOG_VAR_INIT("definitely.not.enabled"); + + int count = 0; + for (int i = 0; i < 10; i++) { + log_do_begin(l) + count++; + log_do_end(l) + } + assert_d_eq(count, 0, "Logging shouldn't happen if not initialized."); +} +TEST_END + +/* + * This really just checks to make sure that this usage compiles; we don't have + * any test code to run. + */ +TEST_BEGIN(test_log_only_format_string) { + if (false) { + LOG("log_str", "No arguments follow this format string."); + } +} +TEST_END + +int +main(void) { + return test( + test_log_disabled, + test_log_enabled_direct, + test_log_enabled_indirect, + test_log_enabled_global, + test_logs_if_no_init, + test_log_only_format_string); +} diff --git a/deps/jemalloc/test/unit/mallctl.c b/deps/jemalloc/test/unit/mallctl.c new file mode 100644 index 0000000..3a75ac0 --- /dev/null +++ b/deps/jemalloc/test/unit/mallctl.c @@ -0,0 +1,888 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/hook.h" +#include "jemalloc/internal/util.h" + +TEST_BEGIN(test_mallctl_errors) { + uint64_t epoch; + size_t sz; + + assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT, + "mallctl() should return ENOENT for non-existent names"); + + assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")), + EPERM, "mallctl() should return EPERM on attempt to write " + "read-only value"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(epoch)-1), EINVAL, + "mallctl() should return EINVAL for input size mismatch"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(epoch)+1), EINVAL, + "mallctl() should return EINVAL for input size mismatch"); + + sz = sizeof(epoch)-1; + assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, + "mallctl() should return EINVAL for output size mismatch"); + sz = sizeof(epoch)+1; + assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, + "mallctl() should return EINVAL for output size mismatch"); +} +TEST_END + +TEST_BEGIN(test_mallctlnametomib_errors) { + size_t mib[1]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT, + "mallctlnametomib() should return ENOENT for non-existent names"); +} +TEST_END + +TEST_BEGIN(test_mallctlbymib_errors) { + uint64_t epoch; + size_t sz; + size_t mib[1]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("version", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0", + strlen("0.0.0")), EPERM, "mallctl() should return EPERM on " + "attempt to write read-only value"); + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, + sizeof(epoch)-1), EINVAL, + "mallctlbymib() should return EINVAL for input size mismatch"); + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, + sizeof(epoch)+1), EINVAL, + "mallctlbymib() should return EINVAL for input size mismatch"); + + sz = sizeof(epoch)-1; + assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), + EINVAL, + "mallctlbymib() should return EINVAL for output size mismatch"); + sz = sizeof(epoch)+1; + assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), + EINVAL, + "mallctlbymib() should return EINVAL for output size mismatch"); +} +TEST_END + +TEST_BEGIN(test_mallctl_read_write) { + uint64_t old_epoch, new_epoch; + size_t sz = sizeof(old_epoch); + + /* Blind. */ + assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Read. */ + assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Write. */ + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch, + sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Read+write. */ + assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, + (void *)&new_epoch, sizeof(new_epoch)), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); +} +TEST_END + +TEST_BEGIN(test_mallctlnametomib_short_mib) { + size_t mib[4]; + size_t miblen; + + miblen = 3; + mib[3] = 42; + assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + assert_zu_eq(miblen, 3, "Unexpected mib output length"); + assert_zu_eq(mib[3], 42, + "mallctlnametomib() wrote past the end of the input mib"); +} +TEST_END + +TEST_BEGIN(test_mallctl_config) { +#define TEST_MALLCTL_CONFIG(config, t) do { \ + t oldval; \ + size_t sz = sizeof(oldval); \ + assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \ + NULL, 0), 0, "Unexpected mallctl() failure"); \ + assert_b_eq(oldval, config_##config, "Incorrect config value"); \ + assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ +} while (0) + + TEST_MALLCTL_CONFIG(cache_oblivious, bool); + TEST_MALLCTL_CONFIG(debug, bool); + TEST_MALLCTL_CONFIG(fill, bool); + TEST_MALLCTL_CONFIG(lazy_lock, bool); + TEST_MALLCTL_CONFIG(malloc_conf, const char *); + TEST_MALLCTL_CONFIG(prof, bool); + TEST_MALLCTL_CONFIG(prof_libgcc, bool); + TEST_MALLCTL_CONFIG(prof_libunwind, bool); + TEST_MALLCTL_CONFIG(stats, bool); + TEST_MALLCTL_CONFIG(utrace, bool); + TEST_MALLCTL_CONFIG(xmalloc, bool); + +#undef TEST_MALLCTL_CONFIG +} +TEST_END + +TEST_BEGIN(test_mallctl_opt) { + bool config_always = true; + +#define TEST_MALLCTL_OPT(t, opt, config) do { \ + t oldval; \ + size_t sz = sizeof(oldval); \ + int expected = config_##config ? 0 : ENOENT; \ + int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \ + 0); \ + assert_d_eq(result, expected, \ + "Unexpected mallctl() result for opt."#opt); \ + assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ +} while (0) + + TEST_MALLCTL_OPT(bool, abort, always); + TEST_MALLCTL_OPT(bool, abort_conf, always); + TEST_MALLCTL_OPT(bool, confirm_conf, always); + TEST_MALLCTL_OPT(const char *, metadata_thp, always); + TEST_MALLCTL_OPT(bool, retain, always); + TEST_MALLCTL_OPT(const char *, dss, always); + TEST_MALLCTL_OPT(unsigned, narenas, always); + TEST_MALLCTL_OPT(const char *, percpu_arena, always); + TEST_MALLCTL_OPT(size_t, oversize_threshold, always); + TEST_MALLCTL_OPT(bool, background_thread, always); + TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always); + TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always); + TEST_MALLCTL_OPT(bool, stats_print, always); + TEST_MALLCTL_OPT(const char *, junk, fill); + TEST_MALLCTL_OPT(bool, zero, fill); + TEST_MALLCTL_OPT(bool, utrace, utrace); + TEST_MALLCTL_OPT(bool, xmalloc, xmalloc); + TEST_MALLCTL_OPT(bool, tcache, always); + TEST_MALLCTL_OPT(size_t, lg_extent_max_active_fit, always); + TEST_MALLCTL_OPT(size_t, lg_tcache_max, always); + TEST_MALLCTL_OPT(const char *, thp, always); + TEST_MALLCTL_OPT(bool, prof, prof); + TEST_MALLCTL_OPT(const char *, prof_prefix, prof); + TEST_MALLCTL_OPT(bool, prof_active, prof); + TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof); + TEST_MALLCTL_OPT(bool, prof_accum, prof); + TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof); + TEST_MALLCTL_OPT(bool, prof_gdump, prof); + TEST_MALLCTL_OPT(bool, prof_final, prof); + TEST_MALLCTL_OPT(bool, prof_leak, prof); + +#undef TEST_MALLCTL_OPT +} +TEST_END + +TEST_BEGIN(test_manpage_example) { + unsigned nbins, i; + size_t mib[4]; + size_t len, miblen; + + len = sizeof(nbins); + assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0, + "Unexpected mallctl() failure"); + + miblen = 4; + assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + for (i = 0; i < nbins; i++) { + size_t bin_size; + + mib[2] = i; + len = sizeof(bin_size); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len, + NULL, 0), 0, "Unexpected mallctlbymib() failure"); + /* Do something with bin_size... */ + } +} +TEST_END + +TEST_BEGIN(test_tcache_none) { + test_skip_if(!opt_tcache); + + /* Allocate p and q. */ + void *p0 = mallocx(42, 0); + assert_ptr_not_null(p0, "Unexpected mallocx() failure"); + void *q = mallocx(42, 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + + /* Deallocate p and q, but bypass the tcache for q. */ + dallocx(p0, 0); + dallocx(q, MALLOCX_TCACHE_NONE); + + /* Make sure that tcache-based allocation returns p, not q. */ + void *p1 = mallocx(42, 0); + assert_ptr_not_null(p1, "Unexpected mallocx() failure"); + assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region"); + + /* Clean up. */ + dallocx(p1, MALLOCX_TCACHE_NONE); +} +TEST_END + +TEST_BEGIN(test_tcache) { +#define NTCACHES 10 + unsigned tis[NTCACHES]; + void *ps[NTCACHES]; + void *qs[NTCACHES]; + unsigned i; + size_t sz, psz, qsz; + + psz = 42; + qsz = nallocx(psz, 0) + 1; + + /* Create tcaches. */ + for (i = 0; i < NTCACHES; i++) { + sz = sizeof(unsigned); + assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, + 0), 0, "Unexpected mallctl() failure, i=%u", i); + } + + /* Exercise tcache ID recycling. */ + for (i = 0; i < NTCACHES; i++) { + assert_d_eq(mallctl("tcache.destroy", NULL, NULL, + (void *)&tis[i], sizeof(unsigned)), 0, + "Unexpected mallctl() failure, i=%u", i); + } + for (i = 0; i < NTCACHES; i++) { + sz = sizeof(unsigned); + assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, + 0), 0, "Unexpected mallctl() failure, i=%u", i); + } + + /* Flush empty tcaches. */ + for (i = 0; i < NTCACHES; i++) { + assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], + sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", + i); + } + + /* Cache some allocations. */ + for (i = 0; i < NTCACHES; i++) { + ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", + i); + dallocx(ps[i], MALLOCX_TCACHE(tis[i])); + + qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u", + i); + dallocx(qs[i], MALLOCX_TCACHE(tis[i])); + } + + /* Verify that tcaches allocate cached regions. */ + for (i = 0; i < NTCACHES; i++) { + void *p0 = ps[i]; + ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", + i); + assert_ptr_eq(ps[i], p0, + "Expected mallocx() to allocate cached region, i=%u", i); + } + + /* Verify that reallocation uses cached regions. */ + for (i = 0; i < NTCACHES; i++) { + void *q0 = qs[i]; + qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u", + i); + assert_ptr_eq(qs[i], q0, + "Expected rallocx() to allocate cached region, i=%u", i); + /* Avoid undefined behavior in case of test failure. */ + if (qs[i] == NULL) { + qs[i] = ps[i]; + } + } + for (i = 0; i < NTCACHES; i++) { + dallocx(qs[i], MALLOCX_TCACHE(tis[i])); + } + + /* Flush some non-empty tcaches. */ + for (i = 0; i < NTCACHES/2; i++) { + assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], + sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", + i); + } + + /* Destroy tcaches. */ + for (i = 0; i < NTCACHES; i++) { + assert_d_eq(mallctl("tcache.destroy", NULL, NULL, + (void *)&tis[i], sizeof(unsigned)), 0, + "Unexpected mallctl() failure, i=%u", i); + } +} +TEST_END + +TEST_BEGIN(test_thread_arena) { + unsigned old_arena_ind, new_arena_ind, narenas; + + const char *opa; + size_t sz = sizeof(opa); + assert_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + if (opt_oversize_threshold != 0) { + narenas--; + } + assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect"); + + if (strcmp(opa, "disabled") == 0) { + new_arena_ind = narenas - 1; + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + (void *)&new_arena_ind, sizeof(unsigned)), 0, + "Unexpected mallctl() failure"); + new_arena_ind = 0; + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + (void *)&new_arena_ind, sizeof(unsigned)), 0, + "Unexpected mallctl() failure"); + } else { + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1; + if (old_arena_ind != new_arena_ind) { + assert_d_eq(mallctl("thread.arena", + (void *)&old_arena_ind, &sz, (void *)&new_arena_ind, + sizeof(unsigned)), EPERM, "thread.arena ctl " + "should not be allowed with percpu arena"); + } + } +} +TEST_END + +TEST_BEGIN(test_arena_i_initialized) { + unsigned narenas, i; + size_t sz; + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); + bool initialized; + + sz = sizeof(narenas); + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + + assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + for (i = 0; i < narenas; i++) { + mib[1] = i; + sz = sizeof(initialized); + assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); + } + + mib[1] = MALLCTL_ARENAS_ALL; + sz = sizeof(initialized); + assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_true(initialized, + "Merged arena statistics should always be initialized"); + + /* Equivalent to the above but using mallctl() directly. */ + sz = sizeof(initialized); + assert_d_eq(mallctl( + "arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized", + (void *)&initialized, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_true(initialized, + "Merged arena statistics should always be initialized"); +} +TEST_END + +TEST_BEGIN(test_arena_i_dirty_decay_ms) { + ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms; + size_t sz = sizeof(ssize_t); + + assert_d_eq(mallctl("arena.0.dirty_decay_ms", + (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + dirty_decay_ms = -2; + assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL, + (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + dirty_decay_ms = 0x7fffffff; + assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL, + (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, + "Unexpected mallctl() failure"); + + for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1; + dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms, + dirty_decay_ms++) { + ssize_t old_dirty_decay_ms; + + assert_d_eq(mallctl("arena.0.dirty_decay_ms", + (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms, + sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); + assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms, + "Unexpected old arena.0.dirty_decay_ms"); + } +} +TEST_END + +TEST_BEGIN(test_arena_i_muzzy_decay_ms) { + ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms; + size_t sz = sizeof(ssize_t); + + assert_d_eq(mallctl("arena.0.muzzy_decay_ms", + (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + muzzy_decay_ms = -2; + assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + muzzy_decay_ms = 0x7fffffff; + assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, + "Unexpected mallctl() failure"); + + for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1; + muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms, + muzzy_decay_ms++) { + ssize_t old_muzzy_decay_ms; + + assert_d_eq(mallctl("arena.0.muzzy_decay_ms", + (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms, + sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); + assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms, + "Unexpected old arena.0.muzzy_decay_ms"); + } +} +TEST_END + +TEST_BEGIN(test_arena_i_purge) { + unsigned narenas; + size_t sz = sizeof(unsigned); + size_t mib[3]; + size_t miblen = 3; + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = narenas; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); + + mib[1] = MALLCTL_ARENAS_ALL; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} +TEST_END + +TEST_BEGIN(test_arena_i_decay) { + unsigned narenas; + size_t sz = sizeof(unsigned); + size_t mib[3]; + size_t miblen = 3; + + assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = narenas; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); + + mib[1] = MALLCTL_ARENAS_ALL; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} +TEST_END + +TEST_BEGIN(test_arena_i_dss) { + const char *dss_prec_old, *dss_prec_new; + size_t sz = sizeof(dss_prec_old); + size_t mib[3]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, + "Unexpected mallctlnametomib() error"); + + dss_prec_new = "disabled"; + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, + (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, + "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected default for dss precedence"); + + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, + (void *)&dss_prec_old, sizeof(dss_prec_old)), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected value for dss precedence"); + + mib[1] = narenas_total_get(); + dss_prec_new = "disabled"; + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, + (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, + "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected default for dss precedence"); + + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, + (void *)&dss_prec_old, sizeof(dss_prec_new)), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected value for dss precedence"); +} +TEST_END + +TEST_BEGIN(test_arena_i_retain_grow_limit) { + size_t old_limit, new_limit, default_limit; + size_t mib[3]; + size_t miblen; + + bool retain_enabled; + size_t sz = sizeof(retain_enabled); + assert_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + test_skip_if(!retain_enabled); + + sz = sizeof(default_limit); + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen), + 0, "Unexpected mallctlnametomib() error"); + + assert_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(default_limit, SC_LARGE_MAXCLASS, + "Unexpected default for retain_grow_limit"); + + new_limit = PAGE - 1; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, + sizeof(new_limit)), EFAULT, "Unexpected mallctl() success"); + + new_limit = PAGE + 1; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, + sizeof(new_limit)), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(old_limit, PAGE, + "Unexpected value for retain_grow_limit"); + + /* Expect grow less than psize class 10. */ + new_limit = sz_pind2sz(10) - 1; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, + sizeof(new_limit)), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(old_limit, sz_pind2sz(9), + "Unexpected value for retain_grow_limit"); + + /* Restore to default. */ + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit, + sizeof(default_limit)), 0, "Unexpected mallctl() failure"); +} +TEST_END + +TEST_BEGIN(test_arenas_dirty_decay_ms) { + ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms; + size_t sz = sizeof(ssize_t); + + assert_d_eq(mallctl("arenas.dirty_decay_ms", + (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + dirty_decay_ms = -2; + assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL, + (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + dirty_decay_ms = 0x7fffffff; + assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL, + (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, + "Expected mallctl() failure"); + + for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1; + dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms, + dirty_decay_ms++) { + ssize_t old_dirty_decay_ms; + + assert_d_eq(mallctl("arenas.dirty_decay_ms", + (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms, + sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); + assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms, + "Unexpected old arenas.dirty_decay_ms"); + } +} +TEST_END + +TEST_BEGIN(test_arenas_muzzy_decay_ms) { + ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms; + size_t sz = sizeof(ssize_t); + + assert_d_eq(mallctl("arenas.muzzy_decay_ms", + (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + muzzy_decay_ms = -2; + assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + muzzy_decay_ms = 0x7fffffff; + assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, + "Expected mallctl() failure"); + + for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1; + muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms, + muzzy_decay_ms++) { + ssize_t old_muzzy_decay_ms; + + assert_d_eq(mallctl("arenas.muzzy_decay_ms", + (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms, + sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); + assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms, + "Unexpected old arenas.muzzy_decay_ms"); + } +} +TEST_END + +TEST_BEGIN(test_arenas_constants) { +#define TEST_ARENAS_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \ + 0), 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); + TEST_ARENAS_CONSTANT(size_t, page, PAGE); + TEST_ARENAS_CONSTANT(unsigned, nbins, SC_NBINS); + TEST_ARENAS_CONSTANT(unsigned, nlextents, SC_NSIZES - SC_NBINS); + +#undef TEST_ARENAS_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_bin_constants) { +#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \ + NULL, 0), 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size); + TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs); + TEST_ARENAS_BIN_CONSTANT(size_t, slab_size, + bin_infos[0].slab_size); + TEST_ARENAS_BIN_CONSTANT(uint32_t, nshards, bin_infos[0].n_shards); + +#undef TEST_ARENAS_BIN_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_lextent_constants) { +#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \ + &sz, NULL, 0), 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_LEXTENT_CONSTANT(size_t, size, + SC_LARGE_MINCLASS); + +#undef TEST_ARENAS_LEXTENT_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_create) { + unsigned narenas_before, arena, narenas_after; + size_t sz = sizeof(unsigned); + + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); + + assert_u_eq(narenas_before+1, narenas_after, + "Unexpected number of arenas before versus after extension"); + assert_u_eq(arena, narenas_after-1, "Unexpected arena index"); +} +TEST_END + +TEST_BEGIN(test_arenas_lookup) { + unsigned arena, arena1; + void *ptr; + size_t sz = sizeof(unsigned); + + assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE); + assert_ptr_not_null(ptr, "Unexpected mallocx() failure"); + assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)), + 0, "Unexpected mallctl() failure"); + assert_u_eq(arena, arena1, "Unexpected arena index"); + dallocx(ptr, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas) { +#define TEST_STATS_ARENAS(t, name) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \ + NULL, 0), 0, "Unexpected mallctl() failure"); \ +} while (0) + + TEST_STATS_ARENAS(unsigned, nthreads); + TEST_STATS_ARENAS(const char *, dss); + TEST_STATS_ARENAS(ssize_t, dirty_decay_ms); + TEST_STATS_ARENAS(ssize_t, muzzy_decay_ms); + TEST_STATS_ARENAS(size_t, pactive); + TEST_STATS_ARENAS(size_t, pdirty); + +#undef TEST_STATS_ARENAS +} +TEST_END + +static void +alloc_hook(void *extra, UNUSED hook_alloc_t type, UNUSED void *result, + UNUSED uintptr_t result_raw, UNUSED uintptr_t args_raw[3]) { + *(bool *)extra = true; +} + +static void +dalloc_hook(void *extra, UNUSED hook_dalloc_t type, + UNUSED void *address, UNUSED uintptr_t args_raw[3]) { + *(bool *)extra = true; +} + +TEST_BEGIN(test_hooks) { + bool hook_called = false; + hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called}; + void *handle = NULL; + size_t sz = sizeof(handle); + int err = mallctl("experimental.hooks.install", &handle, &sz, &hooks, + sizeof(hooks)); + assert_d_eq(err, 0, "Hook installation failed"); + assert_ptr_ne(handle, NULL, "Hook installation gave null handle"); + void *ptr = mallocx(1, 0); + assert_true(hook_called, "Alloc hook not called"); + hook_called = false; + free(ptr); + assert_true(hook_called, "Free hook not called"); + + err = mallctl("experimental.hooks.remove", NULL, NULL, &handle, + sizeof(handle)); + assert_d_eq(err, 0, "Hook removal failed"); + hook_called = false; + ptr = mallocx(1, 0); + free(ptr); + assert_false(hook_called, "Hook called after removal"); +} +TEST_END + +TEST_BEGIN(test_hooks_exhaustion) { + bool hook_called = false; + hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called}; + + void *handle; + void *handles[HOOK_MAX]; + size_t sz = sizeof(handle); + int err; + for (int i = 0; i < HOOK_MAX; i++) { + handle = NULL; + err = mallctl("experimental.hooks.install", &handle, &sz, + &hooks, sizeof(hooks)); + assert_d_eq(err, 0, "Error installation hooks"); + assert_ptr_ne(handle, NULL, "Got NULL handle"); + handles[i] = handle; + } + err = mallctl("experimental.hooks.install", &handle, &sz, &hooks, + sizeof(hooks)); + assert_d_eq(err, EAGAIN, "Should have failed hook installation"); + for (int i = 0; i < HOOK_MAX; i++) { + err = mallctl("experimental.hooks.remove", NULL, NULL, + &handles[i], sizeof(handles[i])); + assert_d_eq(err, 0, "Hook removal failed"); + } + /* Insertion failed, but then we removed some; it should work now. */ + handle = NULL; + err = mallctl("experimental.hooks.install", &handle, &sz, &hooks, + sizeof(hooks)); + assert_d_eq(err, 0, "Hook insertion failed"); + assert_ptr_ne(handle, NULL, "Got NULL handle"); + err = mallctl("experimental.hooks.remove", NULL, NULL, &handle, + sizeof(handle)); + assert_d_eq(err, 0, "Hook removal failed"); +} +TEST_END + +int +main(void) { + return test( + test_mallctl_errors, + test_mallctlnametomib_errors, + test_mallctlbymib_errors, + test_mallctl_read_write, + test_mallctlnametomib_short_mib, + test_mallctl_config, + test_mallctl_opt, + test_manpage_example, + test_tcache_none, + test_tcache, + test_thread_arena, + test_arena_i_initialized, + test_arena_i_dirty_decay_ms, + test_arena_i_muzzy_decay_ms, + test_arena_i_purge, + test_arena_i_decay, + test_arena_i_dss, + test_arena_i_retain_grow_limit, + test_arenas_dirty_decay_ms, + test_arenas_muzzy_decay_ms, + test_arenas_constants, + test_arenas_bin_constants, + test_arenas_lextent_constants, + test_arenas_create, + test_arenas_lookup, + test_stats_arenas, + test_hooks, + test_hooks_exhaustion); +} diff --git a/deps/jemalloc/test/unit/malloc_io.c b/deps/jemalloc/test/unit/malloc_io.c new file mode 100644 index 0000000..79ba7fc --- /dev/null +++ b/deps/jemalloc/test/unit/malloc_io.c @@ -0,0 +1,258 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_malloc_strtoumax_no_endptr) { + int err; + + set_errno(0); + assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result"); + err = get_errno(); + assert_d_eq(err, 0, "Unexpected failure"); +} +TEST_END + +TEST_BEGIN(test_malloc_strtoumax) { + struct test_s { + const char *input; + const char *expected_remainder; + int base; + int expected_errno; + const char *expected_errno_name; + uintmax_t expected_x; + }; +#define ERR(e) e, #e +#define KUMAX(x) ((uintmax_t)x##ULL) +#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL) + struct test_s tests[] = { + {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, + {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, + {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX}, + + {"", "", 0, ERR(EINVAL), UINTMAX_MAX}, + {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX}, + {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX}, + {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX}, + + {"42", "", 0, ERR(0), KUMAX(42)}, + {"+42", "", 0, ERR(0), KUMAX(42)}, + {"-42", "", 0, ERR(0), KSMAX(-42)}, + {"042", "", 0, ERR(0), KUMAX(042)}, + {"+042", "", 0, ERR(0), KUMAX(042)}, + {"-042", "", 0, ERR(0), KSMAX(-042)}, + {"0x42", "", 0, ERR(0), KUMAX(0x42)}, + {"+0x42", "", 0, ERR(0), KUMAX(0x42)}, + {"-0x42", "", 0, ERR(0), KSMAX(-0x42)}, + + {"0", "", 0, ERR(0), KUMAX(0)}, + {"1", "", 0, ERR(0), KUMAX(1)}, + + {"42", "", 0, ERR(0), KUMAX(42)}, + {" 42", "", 0, ERR(0), KUMAX(42)}, + {"42 ", " ", 0, ERR(0), KUMAX(42)}, + {"0x", "x", 0, ERR(0), KUMAX(0)}, + {"42x", "x", 0, ERR(0), KUMAX(42)}, + + {"07", "", 0, ERR(0), KUMAX(7)}, + {"010", "", 0, ERR(0), KUMAX(8)}, + {"08", "8", 0, ERR(0), KUMAX(0)}, + {"0_", "_", 0, ERR(0), KUMAX(0)}, + + {"0x", "x", 0, ERR(0), KUMAX(0)}, + {"0X", "X", 0, ERR(0), KUMAX(0)}, + {"0xg", "xg", 0, ERR(0), KUMAX(0)}, + {"0XA", "", 0, ERR(0), KUMAX(10)}, + + {"010", "", 10, ERR(0), KUMAX(10)}, + {"0x3", "x3", 10, ERR(0), KUMAX(0)}, + + {"12", "2", 2, ERR(0), KUMAX(1)}, + {"78", "8", 8, ERR(0), KUMAX(7)}, + {"9a", "a", 10, ERR(0), KUMAX(9)}, + {"9A", "A", 10, ERR(0), KUMAX(9)}, + {"fg", "g", 16, ERR(0), KUMAX(15)}, + {"FG", "G", 16, ERR(0), KUMAX(15)}, + {"0xfg", "g", 16, ERR(0), KUMAX(15)}, + {"0XFG", "G", 16, ERR(0), KUMAX(15)}, + {"z_", "_", 36, ERR(0), KUMAX(35)}, + {"Z_", "_", 36, ERR(0), KUMAX(35)} + }; +#undef ERR +#undef KUMAX +#undef KSMAX + unsigned i; + + for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) { + struct test_s *test = &tests[i]; + int err; + uintmax_t result; + char *remainder; + + set_errno(0); + result = malloc_strtoumax(test->input, &remainder, test->base); + err = get_errno(); + assert_d_eq(err, test->expected_errno, + "Expected errno %s for \"%s\", base %d", + test->expected_errno_name, test->input, test->base); + assert_str_eq(remainder, test->expected_remainder, + "Unexpected remainder for \"%s\", base %d", + test->input, test->base); + if (err == 0) { + assert_ju_eq(result, test->expected_x, + "Unexpected result for \"%s\", base %d", + test->input, test->base); + } + } +} +TEST_END + +TEST_BEGIN(test_malloc_snprintf_truncated) { +#define BUFLEN 15 + char buf[BUFLEN]; + size_t result; + size_t len; +#define TEST(expected_str_untruncated, ...) do { \ + result = malloc_snprintf(buf, len, __VA_ARGS__); \ + assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ + "Unexpected string inequality (\"%s\" vs \"%s\")", \ + buf, expected_str_untruncated); \ + assert_zu_eq(result, strlen(expected_str_untruncated), \ + "Unexpected result"); \ +} while (0) + + for (len = 1; len < BUFLEN; len++) { + TEST("012346789", "012346789"); + TEST("a0123b", "a%sb", "0123"); + TEST("a01234567", "a%s%s", "0123", "4567"); + TEST("a0123 ", "a%-6s", "0123"); + TEST("a 0123", "a%6s", "0123"); + TEST("a 012", "a%6.3s", "0123"); + TEST("a 012", "a%*.*s", 6, 3, "0123"); + TEST("a 123b", "a% db", 123); + TEST("a123b", "a%-db", 123); + TEST("a-123b", "a%-db", -123); + TEST("a+123b", "a%+db", 123); + } +#undef BUFLEN +#undef TEST +} +TEST_END + +TEST_BEGIN(test_malloc_snprintf) { +#define BUFLEN 128 + char buf[BUFLEN]; + size_t result; +#define TEST(expected_str, ...) do { \ + result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \ + assert_str_eq(buf, expected_str, "Unexpected output"); \ + assert_zu_eq(result, strlen(expected_str), "Unexpected result");\ +} while (0) + + TEST("hello", "hello"); + + TEST("50%, 100%", "50%%, %d%%", 100); + + TEST("a0123b", "a%sb", "0123"); + + TEST("a 0123b", "a%5sb", "0123"); + TEST("a 0123b", "a%*sb", 5, "0123"); + + TEST("a0123 b", "a%-5sb", "0123"); + TEST("a0123b", "a%*sb", -1, "0123"); + TEST("a0123 b", "a%*sb", -5, "0123"); + TEST("a0123 b", "a%-*sb", -5, "0123"); + + TEST("a012b", "a%.3sb", "0123"); + TEST("a012b", "a%.*sb", 3, "0123"); + TEST("a0123b", "a%.*sb", -3, "0123"); + + TEST("a 012b", "a%5.3sb", "0123"); + TEST("a 012b", "a%5.*sb", 3, "0123"); + TEST("a 012b", "a%*.3sb", 5, "0123"); + TEST("a 012b", "a%*.*sb", 5, 3, "0123"); + TEST("a 0123b", "a%*.*sb", 5, -3, "0123"); + + TEST("_abcd_", "_%x_", 0xabcd); + TEST("_0xabcd_", "_%#x_", 0xabcd); + TEST("_1234_", "_%o_", 01234); + TEST("_01234_", "_%#o_", 01234); + TEST("_1234_", "_%u_", 1234); + + TEST("_1234_", "_%d_", 1234); + TEST("_ 1234_", "_% d_", 1234); + TEST("_+1234_", "_%+d_", 1234); + TEST("_-1234_", "_%d_", -1234); + TEST("_-1234_", "_% d_", -1234); + TEST("_-1234_", "_%+d_", -1234); + + TEST("_-1234_", "_%d_", -1234); + TEST("_1234_", "_%d_", 1234); + TEST("_-1234_", "_%i_", -1234); + TEST("_1234_", "_%i_", 1234); + TEST("_01234_", "_%#o_", 01234); + TEST("_1234_", "_%u_", 1234); + TEST("_0x1234abc_", "_%#x_", 0x1234abc); + TEST("_0X1234ABC_", "_%#X_", 0x1234abc); + TEST("_c_", "_%c_", 'c'); + TEST("_string_", "_%s_", "string"); + TEST("_0x42_", "_%p_", ((void *)0x42)); + + TEST("_-1234_", "_%ld_", ((long)-1234)); + TEST("_1234_", "_%ld_", ((long)1234)); + TEST("_-1234_", "_%li_", ((long)-1234)); + TEST("_1234_", "_%li_", ((long)1234)); + TEST("_01234_", "_%#lo_", ((long)01234)); + TEST("_1234_", "_%lu_", ((long)1234)); + TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc)); + TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC)); + + TEST("_-1234_", "_%lld_", ((long long)-1234)); + TEST("_1234_", "_%lld_", ((long long)1234)); + TEST("_-1234_", "_%lli_", ((long long)-1234)); + TEST("_1234_", "_%lli_", ((long long)1234)); + TEST("_01234_", "_%#llo_", ((long long)01234)); + TEST("_1234_", "_%llu_", ((long long)1234)); + TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc)); + TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC)); + + TEST("_-1234_", "_%qd_", ((long long)-1234)); + TEST("_1234_", "_%qd_", ((long long)1234)); + TEST("_-1234_", "_%qi_", ((long long)-1234)); + TEST("_1234_", "_%qi_", ((long long)1234)); + TEST("_01234_", "_%#qo_", ((long long)01234)); + TEST("_1234_", "_%qu_", ((long long)1234)); + TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc)); + TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC)); + + TEST("_-1234_", "_%jd_", ((intmax_t)-1234)); + TEST("_1234_", "_%jd_", ((intmax_t)1234)); + TEST("_-1234_", "_%ji_", ((intmax_t)-1234)); + TEST("_1234_", "_%ji_", ((intmax_t)1234)); + TEST("_01234_", "_%#jo_", ((intmax_t)01234)); + TEST("_1234_", "_%ju_", ((intmax_t)1234)); + TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc)); + TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC)); + + TEST("_1234_", "_%td_", ((ptrdiff_t)1234)); + TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234)); + TEST("_1234_", "_%ti_", ((ptrdiff_t)1234)); + TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234)); + + TEST("_-1234_", "_%zd_", ((ssize_t)-1234)); + TEST("_1234_", "_%zd_", ((ssize_t)1234)); + TEST("_-1234_", "_%zi_", ((ssize_t)-1234)); + TEST("_1234_", "_%zi_", ((ssize_t)1234)); + TEST("_01234_", "_%#zo_", ((ssize_t)01234)); + TEST("_1234_", "_%zu_", ((ssize_t)1234)); + TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc)); + TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC)); +#undef BUFLEN +} +TEST_END + +int +main(void) { + return test( + test_malloc_strtoumax_no_endptr, + test_malloc_strtoumax, + test_malloc_snprintf_truncated, + test_malloc_snprintf); +} diff --git a/deps/jemalloc/test/unit/math.c b/deps/jemalloc/test/unit/math.c new file mode 100644 index 0000000..09ef20c --- /dev/null +++ b/deps/jemalloc/test/unit/math.c @@ -0,0 +1,390 @@ +#include "test/jemalloc_test.h" + +#define MAX_REL_ERR 1.0e-9 +#define MAX_ABS_ERR 1.0e-9 + +#include + +#ifdef __PGI +#undef INFINITY +#endif + +#ifndef INFINITY +#define INFINITY (DBL_MAX + DBL_MAX) +#endif + +static bool +double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) { + double rel_err; + + if (fabs(a - b) < max_abs_err) { + return true; + } + rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a); + return (rel_err < max_rel_err); +} + +static uint64_t +factorial(unsigned x) { + uint64_t ret = 1; + unsigned i; + + for (i = 2; i <= x; i++) { + ret *= (uint64_t)i; + } + + return ret; +} + +TEST_BEGIN(test_ln_gamma_factorial) { + unsigned x; + + /* exp(ln_gamma(x)) == (x-1)! for integer x. */ + for (x = 1; x <= 21; x++) { + assert_true(double_eq_rel(exp(ln_gamma(x)), + (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect factorial result for x=%u", x); + } +} +TEST_END + +/* Expected ln_gamma([0.0..100.0] increment=0.25). */ +static const double ln_gamma_misc_expected[] = { + INFINITY, + 1.28802252469807743, 0.57236494292470008, 0.20328095143129538, + 0.00000000000000000, -0.09827183642181320, -0.12078223763524518, + -0.08440112102048555, 0.00000000000000000, 0.12487171489239651, + 0.28468287047291918, 0.47521466691493719, 0.69314718055994529, + 0.93580193110872523, 1.20097360234707429, 1.48681557859341718, + 1.79175946922805496, 2.11445692745037128, 2.45373657084244234, + 2.80857141857573644, 3.17805383034794575, 3.56137591038669710, + 3.95781396761871651, 4.36671603662228680, 4.78749174278204581, + 5.21960398699022932, 5.66256205985714178, 6.11591589143154568, + 6.57925121201010121, 7.05218545073853953, 7.53436423675873268, + 8.02545839631598312, 8.52516136106541467, 9.03318691960512332, + 9.54926725730099690, 10.07315123968123949, 10.60460290274525086, + 11.14340011995171231, 11.68933342079726856, 12.24220494005076176, + 12.80182748008146909, 13.36802367147604720, 13.94062521940376342, + 14.51947222506051816, 15.10441257307551943, 15.69530137706046524, + 16.29200047656724237, 16.89437797963419285, 17.50230784587389010, + 18.11566950571089407, 18.73434751193644843, 19.35823122022435427, + 19.98721449566188468, 20.62119544270163018, 21.26007615624470048, + 21.90376249182879320, 22.55216385312342098, 23.20519299513386002, + 23.86276584168908954, 24.52480131594137802, 25.19122118273868338, + 25.86194990184851861, 26.53691449111561340, 27.21604439872720604, + 27.89927138384089389, 28.58652940490193828, 29.27775451504081516, + 29.97288476399884871, 30.67186010608067548, 31.37462231367769050, + 32.08111489594735843, 32.79128302226991565, 33.50507345013689076, + 34.22243445715505317, 34.94331577687681545, 35.66766853819134298, + 36.39544520803305261, 37.12659953718355865, 37.86108650896109395, + 38.59886229060776230, 39.33988418719949465, 40.08411059791735198, + 40.83150097453079752, 41.58201578195490100, 42.33561646075348506, + 43.09226539146988699, 43.85192586067515208, 44.61456202863158893, + 45.38013889847690052, 46.14862228684032885, 46.91997879580877395, + 47.69417578616628361, 48.47118135183522014, 49.25096429545256882, + 50.03349410501914463, 50.81874093156324790, 51.60667556776436982, + 52.39726942748592364, 53.19049452616926743, 53.98632346204390586, + 54.78472939811231157, 55.58568604486942633, 56.38916764371992940, + 57.19514895105859864, 58.00360522298051080, 58.81451220059079787, + 59.62784609588432261, 60.44358357816834371, 61.26170176100199427, + 62.08217818962842927, 62.90499082887649962, 63.73011805151035958, + 64.55753862700632340, 65.38723171073768015, 66.21917683354901385, + 67.05335389170279825, 67.88974313718154008, 68.72832516833013017, + 69.56908092082363737, 70.41199165894616385, 71.25703896716800045, + 72.10420474200799390, 72.95347118416940191, 73.80482079093779646, + 74.65823634883015814, 75.51370092648485866, 76.37119786778275454, + 77.23071078519033961, 78.09222355331530707, 78.95572030266725960, + 79.82118541361435859, 80.68860351052903468, 81.55795945611502873, + 82.42923834590904164, 83.30242550295004378, 84.17750647261028973, + 85.05446701758152983, 85.93329311301090456, 86.81397094178107920, + 87.69648688992882057, 88.58082754219766741, 89.46697967771913795, + 90.35493026581838194, 91.24466646193963015, 92.13617560368709292, + 93.02944520697742803, 93.92446296229978486, 94.82121673107967297, + 95.71969454214321615, 96.61988458827809723, 97.52177522288820910, + 98.42535495673848800, 99.33061245478741341, 100.23753653310367895, + 101.14611615586458981, 102.05634043243354370, 102.96819861451382394, + 103.88168009337621811, 104.79677439715833032, 105.71347118823287303, + 106.63176026064346047, 107.55163153760463501, 108.47307506906540198, + 109.39608102933323153, 110.32063971475740516, 111.24674154146920557, + 112.17437704317786995, 113.10353686902013237, 114.03421178146170689, + 114.96639265424990128, 115.90007047041454769, 116.83523632031698014, + 117.77188139974506953, 118.70999700805310795, 119.64957454634490830, + 120.59060551569974962, 121.53308151543865279, 122.47699424143097247, + 123.42233548443955726, 124.36909712850338394, 125.31727114935689826, + 126.26684961288492559, 127.21782467361175861, 128.17018857322420899, + 129.12393363912724453, 130.07905228303084755, 131.03553699956862033, + 131.99338036494577864, 132.95257503561629164, 133.91311374698926784, + 134.87498931216194364, 135.83819462068046846, 136.80272263732638294, + 137.76856640092901785, 138.73571902320256299, 139.70417368760718091, + 140.67392364823425055, 141.64496222871400732, 142.61728282114600574, + 143.59087888505104047, 144.56574394634486680, 145.54187159633210058, + 146.51925549072063859, 147.49788934865566148, 148.47776695177302031, + 149.45888214327129617, 150.44122882700193600, 151.42480096657754984, + 152.40959258449737490, 153.39559776128982094, 154.38281063467164245, + 155.37122539872302696, 156.36083630307879844, 157.35163765213474107, + 158.34362380426921391, 159.33678917107920370, 160.33112821663092973, + 161.32663545672428995, 162.32330545817117695, 163.32113283808695314, + 164.32011226319519892, 165.32023844914485267, 166.32150615984036790, + 167.32391020678358018, 168.32744544842768164, 169.33210678954270634, + 170.33788918059275375, 171.34478761712384198, 172.35279713916281707, + 173.36191283062726143, 174.37212981874515094, 175.38344327348534080, + 176.39584840699734514, 177.40934047306160437, 178.42391476654847793, + 179.43956662288721304, 180.45629141754378111, 181.47408456550741107, + 182.49294152078630304, 183.51285777591152737, 184.53382886144947861, + 185.55585034552262869, 186.57891783333786861, 187.60302696672312095, + 188.62817342367162610, 189.65435291789341932, 190.68156119837468054, + 191.70979404894376330, 192.73904728784492590, 193.76931676731820176, + 194.80059837318714244, 195.83288802445184729, 196.86618167288995096, + 197.90047530266301123, 198.93576492992946214, 199.97204660246373464, + 201.00931639928148797, 202.04757043027063901, 203.08680483582807597, + 204.12701578650228385, 205.16819948264117102, 206.21035215404597807, + 207.25347005962987623, 208.29754948708190909, 209.34258675253678916, + 210.38857820024875878, 211.43552020227099320, 212.48340915813977858, + 213.53224149456323744, 214.58201366511514152, 215.63272214993284592, + 216.68436345542014010, 217.73693411395422004, 218.79043068359703739, + 219.84484974781133815, 220.90018791517996988, 221.95644181913033322, + 223.01360811766215875, 224.07168349307951871, 225.13066465172661879, + 226.19054832372759734, 227.25133126272962159, 228.31301024565024704, + 229.37558207242807384, 230.43904356577689896, 231.50339157094342113, + 232.56862295546847008, 233.63473460895144740, 234.70172344281823484, + 235.76958639009222907, 236.83832040516844586, 237.90792246359117712, + 238.97838956183431947, 240.04971871708477238, 241.12190696702904802, + 242.19495136964280846, 243.26884900298270509, 244.34359696498191283, + 245.41919237324782443, 246.49563236486270057, 247.57291409618682110, + 248.65103474266476269, 249.72999149863338175, 250.80978157713354904, + 251.89040220972316320, 252.97185064629374551, 254.05412415488834199, + 255.13722002152300661, 256.22113555000953511, 257.30586806178126835, + 258.39141489572085675, 259.47777340799029844, 260.56494097186322279, + 261.65291497755913497, 262.74169283208021852, 263.83127195904967266, + 264.92164979855277807, 266.01282380697938379, 267.10479145686849733, + 268.19755023675537586, 269.29109765101975427, 270.38543121973674488, + 271.48054847852881721, 272.57644697842033565, 273.67312428569374561, + 274.77057798174683967, 275.86880566295326389, 276.96780494052313770, + 278.06757344036617496, 279.16810880295668085, 280.26940868320008349, + 281.37147075030043197, 282.47429268763045229, 283.57787219260217171, + 284.68220697654078322, 285.78729476455760050, 286.89313329542699194, + 287.99972032146268930, 289.10705360839756395, 290.21513093526289140, + 291.32395009427028754, 292.43350889069523646, 293.54380514276073200, + 294.65483668152336350, 295.76660135076059532, 296.87909700685889902, + 297.99232151870342022, 299.10627276756946458, 300.22094864701409733, + 301.33634706277030091, 302.45246593264130297, 303.56930318639643929, + 304.68685676566872189, 305.80512462385280514, 306.92410472600477078, + 308.04379504874236773, 309.16419358014690033, 310.28529831966631036, + 311.40710727801865687, 312.52961847709792664, 313.65282994987899201, + 314.77673974032603610, 315.90134590329950015, 317.02664650446632777, + 318.15263962020929966, 319.27932333753892635, 320.40669575400545455, + 321.53475497761127144, 322.66349912672620803, 323.79292633000159185, + 324.92303472628691452, 326.05382246454587403, 327.18528770377525916, + 328.31742861292224234, 329.45024337080525356, 330.58373016603343331, + 331.71788719692847280, 332.85271267144611329, 333.98820480709991898, + 335.12436183088397001, 336.26118197919845443, 337.39866349777429377, + 338.53680464159958774, 339.67560367484657036, 340.81505887079896411, + 341.95516851178109619, 343.09593088908627578, 344.23734430290727460, + 345.37940706226686416, 346.52211748494903532, 347.66547389743118401, + 348.80947463481720661, 349.95411804077025408, 351.09940246744753267, + 352.24532627543504759, 353.39188783368263103, 354.53908551944078908, + 355.68691771819692349, 356.83538282361303118, 357.98447923746385868, + 359.13420536957539753 +}; + +TEST_BEGIN(test_ln_gamma_misc) { + unsigned i; + + for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) { + double x = (double)i * 0.25; + assert_true(double_eq_rel(ln_gamma(x), + ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect ln_gamma result for i=%u", i); + } +} +TEST_END + +/* Expected pt_norm([0.01..0.99] increment=0.01). */ +static const double pt_norm_expected[] = { + -INFINITY, + -2.32634787404084076, -2.05374891063182252, -1.88079360815125085, + -1.75068607125216946, -1.64485362695147264, -1.55477359459685305, + -1.47579102817917063, -1.40507156030963221, -1.34075503369021654, + -1.28155156554460081, -1.22652812003661049, -1.17498679206608991, + -1.12639112903880045, -1.08031934081495606, -1.03643338949378938, + -0.99445788320975281, -0.95416525314619416, -0.91536508784281390, + -0.87789629505122846, -0.84162123357291418, -0.80642124701824025, + -0.77219321418868492, -0.73884684918521371, -0.70630256284008752, + -0.67448975019608171, -0.64334540539291685, -0.61281299101662701, + -0.58284150727121620, -0.55338471955567281, -0.52440051270804067, + -0.49585034734745320, -0.46769879911450812, -0.43991316567323380, + -0.41246312944140462, -0.38532046640756751, -0.35845879325119373, + -0.33185334643681652, -0.30548078809939738, -0.27931903444745404, + -0.25334710313579978, -0.22754497664114931, -0.20189347914185077, + -0.17637416478086135, -0.15096921549677725, -0.12566134685507399, + -0.10043372051146975, -0.07526986209982976, -0.05015358346473352, + -0.02506890825871106, 0.00000000000000000, 0.02506890825871106, + 0.05015358346473366, 0.07526986209982990, 0.10043372051146990, + 0.12566134685507413, 0.15096921549677739, 0.17637416478086146, + 0.20189347914185105, 0.22754497664114931, 0.25334710313579978, + 0.27931903444745404, 0.30548078809939738, 0.33185334643681652, + 0.35845879325119373, 0.38532046640756762, 0.41246312944140484, + 0.43991316567323391, 0.46769879911450835, 0.49585034734745348, + 0.52440051270804111, 0.55338471955567303, 0.58284150727121620, + 0.61281299101662701, 0.64334540539291685, 0.67448975019608171, + 0.70630256284008752, 0.73884684918521371, 0.77219321418868492, + 0.80642124701824036, 0.84162123357291441, 0.87789629505122879, + 0.91536508784281423, 0.95416525314619460, 0.99445788320975348, + 1.03643338949378938, 1.08031934081495606, 1.12639112903880045, + 1.17498679206608991, 1.22652812003661049, 1.28155156554460081, + 1.34075503369021654, 1.40507156030963265, 1.47579102817917085, + 1.55477359459685394, 1.64485362695147308, 1.75068607125217102, + 1.88079360815125041, 2.05374891063182208, 2.32634787404084076 +}; + +TEST_BEGIN(test_pt_norm) { + unsigned i; + + for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) { + double p = (double)i * 0.01; + assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i], + MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect pt_norm result for i=%u", i); + } +} +TEST_END + +/* + * Expected pt_chi2(p=[0.01..0.99] increment=0.07, + * df={0.1, 1.1, 10.1, 100.1, 1000.1}). + */ +static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1}; +static const double pt_chi2_expected[] = { + 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17, + 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09, + 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05, + 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03, + 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00, + + 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113, + 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931, + 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259, + 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304, + 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839, + + 2.606673548632508, 4.602913725294877, 5.646152813924212, + 6.488971315540869, 7.249823275816285, 7.977314231410841, + 8.700354939944047, 9.441728024225892, 10.224338321374127, + 11.076435368801061, 12.039320937038386, 13.183878752697167, + 14.657791935084575, 16.885728216339373, 23.361991680031817, + + 70.14844087392152, 80.92379498849355, 85.53325420085891, + 88.94433120715347, 91.83732712857017, 94.46719943606301, + 96.96896479994635, 99.43412843510363, 101.94074719829733, + 104.57228644307247, 107.43900093448734, 110.71844673417287, + 114.76616819871325, 120.57422505959563, 135.92318818757556, + + 899.0072447849649, 937.9271278858220, 953.8117189560207, + 965.3079371501154, 974.8974061207954, 983.4936235182347, + 991.5691170518946, 999.4334123954690, 1007.3391826856553, + 1015.5445154999951, 1024.3777075619569, 1034.3538789836223, + 1046.4872561869577, 1063.5717461999654, 1107.0741966053859 +}; + +TEST_BEGIN(test_pt_chi2) { + unsigned i, j; + unsigned e = 0; + + for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) { + double df = pt_chi2_df[i]; + double ln_gamma_df = ln_gamma(df * 0.5); + for (j = 1; j < 100; j += 7) { + double p = (double)j * 0.01; + assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df), + pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect pt_chi2 result for i=%u, j=%u", i, j); + e++; + } + } +} +TEST_END + +/* + * Expected pt_gamma(p=[0.1..0.99] increment=0.07, + * shape=[0.5..3.0] increment=0.5). + */ +static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0}; +static const double pt_gamma_expected[] = { + 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02, + 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01, + 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01, + 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01, + 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00, + + 0.01005033585350144, 0.08338160893905107, 0.16251892949777497, + 0.24846135929849966, 0.34249030894677596, 0.44628710262841947, + 0.56211891815354142, 0.69314718055994529, 0.84397007029452920, + 1.02165124753198167, 1.23787435600161766, 1.51412773262977574, + 1.89711998488588196, 2.52572864430825783, 4.60517018598809091, + + 0.05741590094955853, 0.24747378084860744, 0.39888572212236084, + 0.54394139997444901, 0.69048812513915159, 0.84311389861296104, + 1.00580622221479898, 1.18298694218766931, 1.38038096305861213, + 1.60627736383027453, 1.87396970522337947, 2.20749220408081070, + 2.65852391865854942, 3.37934630984842244, 5.67243336507218476, + + 0.1485547402532659, 0.4657458011640391, 0.6832386130709406, + 0.8794297834672100, 1.0700752852474524, 1.2629614217350744, + 1.4638400448580779, 1.6783469900166610, 1.9132338090606940, + 2.1778589228618777, 2.4868823970010991, 2.8664695666264195, + 3.3724415436062114, 4.1682658512758071, 6.6383520679938108, + + 0.2771490383641385, 0.7195001279643727, 0.9969081732265243, + 1.2383497880608061, 1.4675206597269927, 1.6953064251816552, + 1.9291243435606809, 2.1757300955477641, 2.4428032131216391, + 2.7406534569230616, 3.0851445039665513, 3.5043101122033367, + 4.0575997065264637, 4.9182956424675286, 7.5431362346944937, + + 0.4360451650782932, 0.9983600902486267, 1.3306365880734528, + 1.6129750834753802, 1.8767241606994294, 2.1357032436097660, + 2.3988853336865565, 2.6740603137235603, 2.9697561737517959, + 3.2971457713883265, 3.6731795898504660, 4.1275751617770631, + 4.7230515633946677, 5.6417477865306020, 8.4059469148854635 +}; + +TEST_BEGIN(test_pt_gamma_shape) { + unsigned i, j; + unsigned e = 0; + + for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) { + double shape = pt_gamma_shape[i]; + double ln_gamma_shape = ln_gamma(shape); + for (j = 1; j < 100; j += 7) { + double p = (double)j * 0.01; + assert_true(double_eq_rel(pt_gamma(p, shape, 1.0, + ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR, + MAX_ABS_ERR), + "Incorrect pt_gamma result for i=%u, j=%u", i, j); + e++; + } + } +} +TEST_END + +TEST_BEGIN(test_pt_gamma_scale) { + double shape = 1.0; + double ln_gamma_shape = ln_gamma(shape); + + assert_true(double_eq_rel( + pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0, + pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR, + MAX_ABS_ERR), + "Scale should be trivially equivalent to external multiplication"); +} +TEST_END + +int +main(void) { + return test( + test_ln_gamma_factorial, + test_ln_gamma_misc, + test_pt_norm, + test_pt_chi2, + test_pt_gamma_shape, + test_pt_gamma_scale); +} diff --git a/deps/jemalloc/test/unit/mq.c b/deps/jemalloc/test/unit/mq.c new file mode 100644 index 0000000..57a4d54 --- /dev/null +++ b/deps/jemalloc/test/unit/mq.c @@ -0,0 +1,89 @@ +#include "test/jemalloc_test.h" + +#define NSENDERS 3 +#define NMSGS 100000 + +typedef struct mq_msg_s mq_msg_t; +struct mq_msg_s { + mq_msg(mq_msg_t) link; +}; +mq_gen(static, mq_, mq_t, mq_msg_t, link) + +TEST_BEGIN(test_mq_basic) { + mq_t mq; + mq_msg_t msg; + + assert_false(mq_init(&mq), "Unexpected mq_init() failure"); + assert_u_eq(mq_count(&mq), 0, "mq should be empty"); + assert_ptr_null(mq_tryget(&mq), + "mq_tryget() should fail when the queue is empty"); + + mq_put(&mq, &msg); + assert_u_eq(mq_count(&mq), 1, "mq should contain one message"); + assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg"); + + mq_put(&mq, &msg); + assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg"); + + mq_fini(&mq); +} +TEST_END + +static void * +thd_receiver_start(void *arg) { + mq_t *mq = (mq_t *)arg; + unsigned i; + + for (i = 0; i < (NSENDERS * NMSGS); i++) { + mq_msg_t *msg = mq_get(mq); + assert_ptr_not_null(msg, "mq_get() should never return NULL"); + dallocx(msg, 0); + } + return NULL; +} + +static void * +thd_sender_start(void *arg) { + mq_t *mq = (mq_t *)arg; + unsigned i; + + for (i = 0; i < NMSGS; i++) { + mq_msg_t *msg; + void *p; + p = mallocx(sizeof(mq_msg_t), 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + msg = (mq_msg_t *)p; + mq_put(mq, msg); + } + return NULL; +} + +TEST_BEGIN(test_mq_threaded) { + mq_t mq; + thd_t receiver; + thd_t senders[NSENDERS]; + unsigned i; + + assert_false(mq_init(&mq), "Unexpected mq_init() failure"); + + thd_create(&receiver, thd_receiver_start, (void *)&mq); + for (i = 0; i < NSENDERS; i++) { + thd_create(&senders[i], thd_sender_start, (void *)&mq); + } + + thd_join(receiver, NULL); + for (i = 0; i < NSENDERS; i++) { + thd_join(senders[i], NULL); + } + + mq_fini(&mq); +} +TEST_END + +int +main(void) { + return test( + test_mq_basic, + test_mq_threaded); +} + diff --git a/deps/jemalloc/test/unit/mtx.c b/deps/jemalloc/test/unit/mtx.c new file mode 100644 index 0000000..424587b --- /dev/null +++ b/deps/jemalloc/test/unit/mtx.c @@ -0,0 +1,57 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 2 +#define NINCRS 2000000 + +TEST_BEGIN(test_mtx_basic) { + mtx_t mtx; + + assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure"); + mtx_lock(&mtx); + mtx_unlock(&mtx); + mtx_fini(&mtx); +} +TEST_END + +typedef struct { + mtx_t mtx; + unsigned x; +} thd_start_arg_t; + +static void * +thd_start(void *varg) { + thd_start_arg_t *arg = (thd_start_arg_t *)varg; + unsigned i; + + for (i = 0; i < NINCRS; i++) { + mtx_lock(&arg->mtx); + arg->x++; + mtx_unlock(&arg->mtx); + } + return NULL; +} + +TEST_BEGIN(test_mtx_race) { + thd_start_arg_t arg; + thd_t thds[NTHREADS]; + unsigned i; + + assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure"); + arg.x = 0; + for (i = 0; i < NTHREADS; i++) { + thd_create(&thds[i], thd_start, (void *)&arg); + } + for (i = 0; i < NTHREADS; i++) { + thd_join(thds[i], NULL); + } + assert_u_eq(arg.x, NTHREADS * NINCRS, + "Race-related counter corruption"); +} +TEST_END + +int +main(void) { + return test( + test_mtx_basic, + test_mtx_race); +} diff --git a/deps/jemalloc/test/unit/nstime.c b/deps/jemalloc/test/unit/nstime.c new file mode 100644 index 0000000..f313780 --- /dev/null +++ b/deps/jemalloc/test/unit/nstime.c @@ -0,0 +1,249 @@ +#include "test/jemalloc_test.h" + +#define BILLION UINT64_C(1000000000) + +TEST_BEGIN(test_nstime_init) { + nstime_t nst; + + nstime_init(&nst, 42000000043); + assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read"); + assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read"); + assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read"); +} +TEST_END + +TEST_BEGIN(test_nstime_init2) { + nstime_t nst; + + nstime_init2(&nst, 42, 43); + assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read"); + assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read"); +} +TEST_END + +TEST_BEGIN(test_nstime_copy) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_init(&nstb, 0); + nstime_copy(&nstb, &nsta); + assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied"); + assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied"); +} +TEST_END + +TEST_BEGIN(test_nstime_compare) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal"); + assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal"); + + nstime_init2(&nstb, 42, 42); + assert_d_eq(nstime_compare(&nsta, &nstb), 1, + "nsta should be greater than nstb"); + assert_d_eq(nstime_compare(&nstb, &nsta), -1, + "nstb should be less than nsta"); + + nstime_init2(&nstb, 42, 44); + assert_d_eq(nstime_compare(&nsta, &nstb), -1, + "nsta should be less than nstb"); + assert_d_eq(nstime_compare(&nstb, &nsta), 1, + "nstb should be greater than nsta"); + + nstime_init2(&nstb, 41, BILLION - 1); + assert_d_eq(nstime_compare(&nsta, &nstb), 1, + "nsta should be greater than nstb"); + assert_d_eq(nstime_compare(&nstb, &nsta), -1, + "nstb should be less than nsta"); + + nstime_init2(&nstb, 43, 0); + assert_d_eq(nstime_compare(&nsta, &nstb), -1, + "nsta should be less than nstb"); + assert_d_eq(nstime_compare(&nstb, &nsta), 1, + "nstb should be greater than nsta"); +} +TEST_END + +TEST_BEGIN(test_nstime_add) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_add(&nsta, &nstb); + nstime_init2(&nstb, 84, 86); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect addition result"); + + nstime_init2(&nsta, 42, BILLION - 1); + nstime_copy(&nstb, &nsta); + nstime_add(&nsta, &nstb); + nstime_init2(&nstb, 85, BILLION - 2); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect addition result"); +} +TEST_END + +TEST_BEGIN(test_nstime_iadd) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, BILLION - 1); + nstime_iadd(&nsta, 1); + nstime_init2(&nstb, 43, 0); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect addition result"); + + nstime_init2(&nsta, 42, 1); + nstime_iadd(&nsta, BILLION + 1); + nstime_init2(&nstb, 43, 2); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect addition result"); +} +TEST_END + +TEST_BEGIN(test_nstime_subtract) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_subtract(&nsta, &nstb); + nstime_init(&nstb, 0); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect subtraction result"); + + nstime_init2(&nsta, 42, 43); + nstime_init2(&nstb, 41, 44); + nstime_subtract(&nsta, &nstb); + nstime_init2(&nstb, 0, BILLION - 1); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect subtraction result"); +} +TEST_END + +TEST_BEGIN(test_nstime_isubtract) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_isubtract(&nsta, 42*BILLION + 43); + nstime_init(&nstb, 0); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect subtraction result"); + + nstime_init2(&nsta, 42, 43); + nstime_isubtract(&nsta, 41*BILLION + 44); + nstime_init2(&nstb, 0, BILLION - 1); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect subtraction result"); +} +TEST_END + +TEST_BEGIN(test_nstime_imultiply) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_imultiply(&nsta, 10); + nstime_init2(&nstb, 420, 430); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect multiplication result"); + + nstime_init2(&nsta, 42, 666666666); + nstime_imultiply(&nsta, 3); + nstime_init2(&nstb, 127, 999999998); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect multiplication result"); +} +TEST_END + +TEST_BEGIN(test_nstime_idivide) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 10); + nstime_idivide(&nsta, 10); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect division result"); + + nstime_init2(&nsta, 42, 666666666); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 3); + nstime_idivide(&nsta, 3); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect division result"); +} +TEST_END + +TEST_BEGIN(test_nstime_divide) { + nstime_t nsta, nstb, nstc; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 10); + assert_u64_eq(nstime_divide(&nsta, &nstb), 10, + "Incorrect division result"); + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 10); + nstime_init(&nstc, 1); + nstime_add(&nsta, &nstc); + assert_u64_eq(nstime_divide(&nsta, &nstb), 10, + "Incorrect division result"); + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 10); + nstime_init(&nstc, 1); + nstime_subtract(&nsta, &nstc); + assert_u64_eq(nstime_divide(&nsta, &nstb), 9, + "Incorrect division result"); +} +TEST_END + +TEST_BEGIN(test_nstime_monotonic) { + nstime_monotonic(); +} +TEST_END + +TEST_BEGIN(test_nstime_update) { + nstime_t nst; + + nstime_init(&nst, 0); + + assert_false(nstime_update(&nst), "Basic time update failed."); + + /* Only Rip Van Winkle sleeps this long. */ + { + nstime_t addend; + nstime_init2(&addend, 631152000, 0); + nstime_add(&nst, &addend); + } + { + nstime_t nst0; + nstime_copy(&nst0, &nst); + assert_true(nstime_update(&nst), + "Update should detect time roll-back."); + assert_d_eq(nstime_compare(&nst, &nst0), 0, + "Time should not have been modified"); + } +} +TEST_END + +int +main(void) { + return test( + test_nstime_init, + test_nstime_init2, + test_nstime_copy, + test_nstime_compare, + test_nstime_add, + test_nstime_iadd, + test_nstime_subtract, + test_nstime_isubtract, + test_nstime_imultiply, + test_nstime_idivide, + test_nstime_divide, + test_nstime_monotonic, + test_nstime_update); +} diff --git a/deps/jemalloc/test/unit/pack.c b/deps/jemalloc/test/unit/pack.c new file mode 100644 index 0000000..fc188b0 --- /dev/null +++ b/deps/jemalloc/test/unit/pack.c @@ -0,0 +1,166 @@ +#include "test/jemalloc_test.h" + +/* + * Size class that is a divisor of the page size, ideally 4+ regions per run. + */ +#if LG_PAGE <= 14 +#define SZ (ZU(1) << (LG_PAGE - 2)) +#else +#define SZ ZU(4096) +#endif + +/* + * Number of slabs to consume at high water mark. Should be at least 2 so that + * if mmap()ed memory grows downward, downward growth of mmap()ed memory is + * tested. + */ +#define NSLABS 8 + +static unsigned +binind_compute(void) { + size_t sz; + unsigned nbins, i; + + sz = sizeof(nbins); + assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); + + for (i = 0; i < nbins; i++) { + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + size_t size; + + assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, + &miblen), 0, "Unexpected mallctlnametomb failure"); + mib[2] = (size_t)i; + + sz = sizeof(size); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL, + 0), 0, "Unexpected mallctlbymib failure"); + if (size == SZ) { + return i; + } + } + + test_fail("Unable to compute nregs_per_run"); + return 0; +} + +static size_t +nregs_per_run_compute(void) { + uint32_t nregs; + size_t sz; + unsigned binind = binind_compute(); + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + + assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, + "Unexpected mallctlnametomb failure"); + mib[2] = (size_t)binind; + sz = sizeof(nregs); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL, + 0), 0, "Unexpected mallctlbymib failure"); + return nregs; +} + +static unsigned +arenas_create_mallctl(void) { + unsigned arena_ind; + size_t sz; + + sz = sizeof(arena_ind); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Error in arenas.create"); + + return arena_ind; +} + +static void +arena_reset_mallctl(unsigned arena_ind) { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + + assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +TEST_BEGIN(test_pack) { + bool prof_enabled; + size_t sz = sizeof(prof_enabled); + if (mallctl("opt.prof", (void *)&prof_enabled, &sz, NULL, 0) == 0) { + test_skip_if(prof_enabled); + } + + unsigned arena_ind = arenas_create_mallctl(); + size_t nregs_per_run = nregs_per_run_compute(); + size_t nregs = nregs_per_run * NSLABS; + VARIABLE_ARRAY(void *, ptrs, nregs); + size_t i, j, offset; + + /* Fill matrix. */ + for (i = offset = 0; i < NSLABS; i++) { + for (j = 0; j < nregs_per_run; j++) { + void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | + MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, + "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |" + " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu", + SZ, arena_ind, i, j); + ptrs[(i * nregs_per_run) + j] = p; + } + } + + /* + * Free all but one region of each run, but rotate which region is + * preserved, so that subsequent allocations exercise the within-run + * layout policy. + */ + offset = 0; + for (i = offset = 0; + i < NSLABS; + i++, offset = (offset + 1) % nregs_per_run) { + for (j = 0; j < nregs_per_run; j++) { + void *p = ptrs[(i * nregs_per_run) + j]; + if (offset == j) { + continue; + } + dallocx(p, MALLOCX_ARENA(arena_ind) | + MALLOCX_TCACHE_NONE); + } + } + + /* + * Logically refill matrix, skipping preserved regions and verifying + * that the matrix is unmodified. + */ + offset = 0; + for (i = offset = 0; + i < NSLABS; + i++, offset = (offset + 1) % nregs_per_run) { + for (j = 0; j < nregs_per_run; j++) { + void *p; + + if (offset == j) { + continue; + } + p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | + MALLOCX_TCACHE_NONE); + assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j], + "Unexpected refill discrepancy, run=%zu, reg=%zu\n", + i, j); + } + } + + /* Clean up. */ + arena_reset_mallctl(arena_ind); +} +TEST_END + +int +main(void) { + return test( + test_pack); +} diff --git a/deps/jemalloc/test/unit/pack.sh b/deps/jemalloc/test/unit/pack.sh new file mode 100644 index 0000000..6f45148 --- /dev/null +++ b/deps/jemalloc/test/unit/pack.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +# Immediately purge to minimize fragmentation. +export MALLOC_CONF="dirty_decay_ms:0,muzzy_decay_ms:0" diff --git a/deps/jemalloc/test/unit/pages.c b/deps/jemalloc/test/unit/pages.c new file mode 100644 index 0000000..ee729ee --- /dev/null +++ b/deps/jemalloc/test/unit/pages.c @@ -0,0 +1,29 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_pages_huge) { + size_t alloc_size; + bool commit; + void *pages, *hugepage; + + alloc_size = HUGEPAGE * 2 - PAGE; + commit = true; + pages = pages_map(NULL, alloc_size, PAGE, &commit); + assert_ptr_not_null(pages, "Unexpected pages_map() error"); + + if (init_system_thp_mode == thp_mode_default) { + hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE)); + assert_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge, + "Unexpected pages_huge() result"); + assert_false(pages_nohuge(hugepage, HUGEPAGE), + "Unexpected pages_nohuge() result"); + } + + pages_unmap(pages, alloc_size); +} +TEST_END + +int +main(void) { + return test( + test_pages_huge); +} diff --git a/deps/jemalloc/test/unit/ph.c b/deps/jemalloc/test/unit/ph.c new file mode 100644 index 0000000..88bf56f --- /dev/null +++ b/deps/jemalloc/test/unit/ph.c @@ -0,0 +1,318 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/ph.h" + +typedef struct node_s node_t; + +struct node_s { +#define NODE_MAGIC 0x9823af7e + uint32_t magic; + phn(node_t) link; + uint64_t key; +}; + +static int +node_cmp(const node_t *a, const node_t *b) { + int ret; + + ret = (a->key > b->key) - (a->key < b->key); + if (ret == 0) { + /* + * Duplicates are not allowed in the heap, so force an + * arbitrary ordering for non-identical items with equal keys. + */ + ret = (((uintptr_t)a) > ((uintptr_t)b)) + - (((uintptr_t)a) < ((uintptr_t)b)); + } + return ret; +} + +static int +node_cmp_magic(const node_t *a, const node_t *b) { + + assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); + assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); + + return node_cmp(a, b); +} + +typedef ph(node_t) heap_t; +ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic); + +static void +node_print(const node_t *node, unsigned depth) { + unsigned i; + node_t *leftmost_child, *sibling; + + for (i = 0; i < depth; i++) { + malloc_printf("\t"); + } + malloc_printf("%2"FMTu64"\n", node->key); + + leftmost_child = phn_lchild_get(node_t, link, node); + if (leftmost_child == NULL) { + return; + } + node_print(leftmost_child, depth + 1); + + for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != + NULL; sibling = phn_next_get(node_t, link, sibling)) { + node_print(sibling, depth + 1); + } +} + +static void +heap_print(const heap_t *heap) { + node_t *auxelm; + + malloc_printf("vvv heap %p vvv\n", heap); + if (heap->ph_root == NULL) { + goto label_return; + } + + node_print(heap->ph_root, 0); + + for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; + auxelm = phn_next_get(node_t, link, auxelm)) { + assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, + link, auxelm)), auxelm, + "auxelm's prev doesn't link to auxelm"); + node_print(auxelm, 0); + } + +label_return: + malloc_printf("^^^ heap %p ^^^\n", heap); +} + +static unsigned +node_validate(const node_t *node, const node_t *parent) { + unsigned nnodes = 1; + node_t *leftmost_child, *sibling; + + if (parent != NULL) { + assert_d_ge(node_cmp_magic(node, parent), 0, + "Child is less than parent"); + } + + leftmost_child = phn_lchild_get(node_t, link, node); + if (leftmost_child == NULL) { + return nnodes; + } + assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child), + (void *)node, "Leftmost child does not link to node"); + nnodes += node_validate(leftmost_child, node); + + for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != + NULL; sibling = phn_next_get(node_t, link, sibling)) { + assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, + link, sibling)), sibling, + "sibling's prev doesn't link to sibling"); + nnodes += node_validate(sibling, node); + } + return nnodes; +} + +static unsigned +heap_validate(const heap_t *heap) { + unsigned nnodes = 0; + node_t *auxelm; + + if (heap->ph_root == NULL) { + goto label_return; + } + + nnodes += node_validate(heap->ph_root, NULL); + + for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; + auxelm = phn_next_get(node_t, link, auxelm)) { + assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, + link, auxelm)), auxelm, + "auxelm's prev doesn't link to auxelm"); + nnodes += node_validate(auxelm, NULL); + } + +label_return: + if (false) { + heap_print(heap); + } + return nnodes; +} + +TEST_BEGIN(test_ph_empty) { + heap_t heap; + + heap_new(&heap); + assert_true(heap_empty(&heap), "Heap should be empty"); + assert_ptr_null(heap_first(&heap), "Unexpected node"); + assert_ptr_null(heap_any(&heap), "Unexpected node"); +} +TEST_END + +static void +node_remove(heap_t *heap, node_t *node) { + heap_remove(heap, node); + + node->magic = 0; +} + +static node_t * +node_remove_first(heap_t *heap) { + node_t *node = heap_remove_first(heap); + node->magic = 0; + return node; +} + +static node_t * +node_remove_any(heap_t *heap) { + node_t *node = heap_remove_any(heap); + node->magic = 0; + return node; +} + +TEST_BEGIN(test_ph_random) { +#define NNODES 25 +#define NBAGS 250 +#define SEED 42 + sfmt_t *sfmt; + uint64_t bag[NNODES]; + heap_t heap; + node_t nodes[NNODES]; + unsigned i, j, k; + + sfmt = init_gen_rand(SEED); + for (i = 0; i < NBAGS; i++) { + switch (i) { + case 0: + /* Insert in order. */ + for (j = 0; j < NNODES; j++) { + bag[j] = j; + } + break; + case 1: + /* Insert in reverse order. */ + for (j = 0; j < NNODES; j++) { + bag[j] = NNODES - j - 1; + } + break; + default: + for (j = 0; j < NNODES; j++) { + bag[j] = gen_rand64_range(sfmt, NNODES); + } + } + + for (j = 1; j <= NNODES; j++) { + /* Initialize heap and nodes. */ + heap_new(&heap); + assert_u_eq(heap_validate(&heap), 0, + "Incorrect node count"); + for (k = 0; k < j; k++) { + nodes[k].magic = NODE_MAGIC; + nodes[k].key = bag[k]; + } + + /* Insert nodes. */ + for (k = 0; k < j; k++) { + heap_insert(&heap, &nodes[k]); + if (i % 13 == 12) { + assert_ptr_not_null(heap_any(&heap), + "Heap should not be empty"); + /* Trigger merging. */ + assert_ptr_not_null(heap_first(&heap), + "Heap should not be empty"); + } + assert_u_eq(heap_validate(&heap), k + 1, + "Incorrect node count"); + } + + assert_false(heap_empty(&heap), + "Heap should not be empty"); + + /* Remove nodes. */ + switch (i % 6) { + case 0: + for (k = 0; k < j; k++) { + assert_u_eq(heap_validate(&heap), j - k, + "Incorrect node count"); + node_remove(&heap, &nodes[k]); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + } + break; + case 1: + for (k = j; k > 0; k--) { + node_remove(&heap, &nodes[k-1]); + assert_u_eq(heap_validate(&heap), k - 1, + "Incorrect node count"); + } + break; + case 2: { + node_t *prev = NULL; + for (k = 0; k < j; k++) { + node_t *node = node_remove_first(&heap); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + if (prev != NULL) { + assert_d_ge(node_cmp(node, + prev), 0, + "Bad removal order"); + } + prev = node; + } + break; + } case 3: { + node_t *prev = NULL; + for (k = 0; k < j; k++) { + node_t *node = heap_first(&heap); + assert_u_eq(heap_validate(&heap), j - k, + "Incorrect node count"); + if (prev != NULL) { + assert_d_ge(node_cmp(node, + prev), 0, + "Bad removal order"); + } + node_remove(&heap, node); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + prev = node; + } + break; + } case 4: { + for (k = 0; k < j; k++) { + node_remove_any(&heap); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + } + break; + } case 5: { + for (k = 0; k < j; k++) { + node_t *node = heap_any(&heap); + assert_u_eq(heap_validate(&heap), j - k, + "Incorrect node count"); + node_remove(&heap, node); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + } + break; + } default: + not_reached(); + } + + assert_ptr_null(heap_first(&heap), + "Heap should be empty"); + assert_ptr_null(heap_any(&heap), + "Heap should be empty"); + assert_true(heap_empty(&heap), "Heap should be empty"); + } + } + fini_gen_rand(sfmt); +#undef NNODES +#undef SEED +} +TEST_END + +int +main(void) { + return test( + test_ph_empty, + test_ph_random); +} diff --git a/deps/jemalloc/test/unit/prng.c b/deps/jemalloc/test/unit/prng.c new file mode 100644 index 0000000..b5795c2 --- /dev/null +++ b/deps/jemalloc/test/unit/prng.c @@ -0,0 +1,237 @@ +#include "test/jemalloc_test.h" + +static void +test_prng_lg_range_u32(bool atomic) { + atomic_u32_t sa, sb; + uint32_t ra, rb; + unsigned lg_range; + + atomic_store_u32(&sa, 42, ATOMIC_RELAXED); + ra = prng_lg_range_u32(&sa, 32, atomic); + atomic_store_u32(&sa, 42, ATOMIC_RELAXED); + rb = prng_lg_range_u32(&sa, 32, atomic); + assert_u32_eq(ra, rb, + "Repeated generation should produce repeated results"); + + atomic_store_u32(&sb, 42, ATOMIC_RELAXED); + rb = prng_lg_range_u32(&sb, 32, atomic); + assert_u32_eq(ra, rb, + "Equivalent generation should produce equivalent results"); + + atomic_store_u32(&sa, 42, ATOMIC_RELAXED); + ra = prng_lg_range_u32(&sa, 32, atomic); + rb = prng_lg_range_u32(&sa, 32, atomic); + assert_u32_ne(ra, rb, + "Full-width results must not immediately repeat"); + + atomic_store_u32(&sa, 42, ATOMIC_RELAXED); + ra = prng_lg_range_u32(&sa, 32, atomic); + for (lg_range = 31; lg_range > 0; lg_range--) { + atomic_store_u32(&sb, 42, ATOMIC_RELAXED); + rb = prng_lg_range_u32(&sb, lg_range, atomic); + assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)), + 0, "High order bits should be 0, lg_range=%u", lg_range); + assert_u32_eq(rb, (ra >> (32 - lg_range)), + "Expected high order bits of full-width result, " + "lg_range=%u", lg_range); + } +} + +static void +test_prng_lg_range_u64(void) { + uint64_t sa, sb, ra, rb; + unsigned lg_range; + + sa = 42; + ra = prng_lg_range_u64(&sa, 64); + sa = 42; + rb = prng_lg_range_u64(&sa, 64); + assert_u64_eq(ra, rb, + "Repeated generation should produce repeated results"); + + sb = 42; + rb = prng_lg_range_u64(&sb, 64); + assert_u64_eq(ra, rb, + "Equivalent generation should produce equivalent results"); + + sa = 42; + ra = prng_lg_range_u64(&sa, 64); + rb = prng_lg_range_u64(&sa, 64); + assert_u64_ne(ra, rb, + "Full-width results must not immediately repeat"); + + sa = 42; + ra = prng_lg_range_u64(&sa, 64); + for (lg_range = 63; lg_range > 0; lg_range--) { + sb = 42; + rb = prng_lg_range_u64(&sb, lg_range); + assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)), + 0, "High order bits should be 0, lg_range=%u", lg_range); + assert_u64_eq(rb, (ra >> (64 - lg_range)), + "Expected high order bits of full-width result, " + "lg_range=%u", lg_range); + } +} + +static void +test_prng_lg_range_zu(bool atomic) { + atomic_zu_t sa, sb; + size_t ra, rb; + unsigned lg_range; + + atomic_store_zu(&sa, 42, ATOMIC_RELAXED); + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + atomic_store_zu(&sa, 42, ATOMIC_RELAXED); + rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + assert_zu_eq(ra, rb, + "Repeated generation should produce repeated results"); + + atomic_store_zu(&sb, 42, ATOMIC_RELAXED); + rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + assert_zu_eq(ra, rb, + "Equivalent generation should produce equivalent results"); + + atomic_store_zu(&sa, 42, ATOMIC_RELAXED); + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + assert_zu_ne(ra, rb, + "Full-width results must not immediately repeat"); + + atomic_store_zu(&sa, 42, ATOMIC_RELAXED); + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0; + lg_range--) { + atomic_store_zu(&sb, 42, ATOMIC_RELAXED); + rb = prng_lg_range_zu(&sb, lg_range, atomic); + assert_zu_eq((rb & (SIZE_T_MAX << lg_range)), + 0, "High order bits should be 0, lg_range=%u", lg_range); + assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - + lg_range)), "Expected high order bits of full-width " + "result, lg_range=%u", lg_range); + } +} + +TEST_BEGIN(test_prng_lg_range_u32_nonatomic) { + test_prng_lg_range_u32(false); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_u32_atomic) { + test_prng_lg_range_u32(true); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_u64_nonatomic) { + test_prng_lg_range_u64(); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_zu_nonatomic) { + test_prng_lg_range_zu(false); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_zu_atomic) { + test_prng_lg_range_zu(true); +} +TEST_END + +static void +test_prng_range_u32(bool atomic) { + uint32_t range; +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 + + for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { + atomic_u32_t s; + unsigned rep; + + atomic_store_u32(&s, range, ATOMIC_RELAXED); + for (rep = 0; rep < NREPS; rep++) { + uint32_t r = prng_range_u32(&s, range, atomic); + + assert_u32_lt(r, range, "Out of range"); + } + } +} + +static void +test_prng_range_u64(void) { + uint64_t range; +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 + + for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { + uint64_t s; + unsigned rep; + + s = range; + for (rep = 0; rep < NREPS; rep++) { + uint64_t r = prng_range_u64(&s, range); + + assert_u64_lt(r, range, "Out of range"); + } + } +} + +static void +test_prng_range_zu(bool atomic) { + size_t range; +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 + + for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { + atomic_zu_t s; + unsigned rep; + + atomic_store_zu(&s, range, ATOMIC_RELAXED); + for (rep = 0; rep < NREPS; rep++) { + size_t r = prng_range_zu(&s, range, atomic); + + assert_zu_lt(r, range, "Out of range"); + } + } +} + +TEST_BEGIN(test_prng_range_u32_nonatomic) { + test_prng_range_u32(false); +} +TEST_END + +TEST_BEGIN(test_prng_range_u32_atomic) { + test_prng_range_u32(true); +} +TEST_END + +TEST_BEGIN(test_prng_range_u64_nonatomic) { + test_prng_range_u64(); +} +TEST_END + +TEST_BEGIN(test_prng_range_zu_nonatomic) { + test_prng_range_zu(false); +} +TEST_END + +TEST_BEGIN(test_prng_range_zu_atomic) { + test_prng_range_zu(true); +} +TEST_END + +int +main(void) { + return test( + test_prng_lg_range_u32_nonatomic, + test_prng_lg_range_u32_atomic, + test_prng_lg_range_u64_nonatomic, + test_prng_lg_range_zu_nonatomic, + test_prng_lg_range_zu_atomic, + test_prng_range_u32_nonatomic, + test_prng_range_u32_atomic, + test_prng_range_u64_nonatomic, + test_prng_range_zu_nonatomic, + test_prng_range_zu_atomic); +} diff --git a/deps/jemalloc/test/unit/prof_accum.c b/deps/jemalloc/test/unit/prof_accum.c new file mode 100644 index 0000000..2522006 --- /dev/null +++ b/deps/jemalloc/test/unit/prof_accum.c @@ -0,0 +1,81 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 4 +#define NALLOCS_PER_THREAD 50 +#define DUMP_INTERVAL 1 +#define BT_COUNT_CHECK_INTERVAL 5 + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) { + int fd; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return fd; +} + +static void * +alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) { + return btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration); +} + +static void * +thd_start(void *varg) { + unsigned thd_ind = *(unsigned *)varg; + size_t bt_count_prev, bt_count; + unsigned i_prev, i; + + i_prev = 0; + bt_count_prev = 0; + for (i = 0; i < NALLOCS_PER_THREAD; i++) { + void *p = alloc_from_permuted_backtrace(thd_ind, i); + dallocx(p, 0); + if (i % DUMP_INTERVAL == 0) { + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + } + + if (i % BT_COUNT_CHECK_INTERVAL == 0 || + i+1 == NALLOCS_PER_THREAD) { + bt_count = prof_bt_count(); + assert_zu_le(bt_count_prev+(i-i_prev), bt_count, + "Expected larger backtrace count increase"); + i_prev = i; + bt_count_prev = bt_count; + } + } + + return NULL; +} + +TEST_BEGIN(test_idump) { + bool active; + thd_t thds[NTHREADS]; + unsigned thd_args[NTHREADS]; + unsigned i; + + test_skip_if(!config_prof); + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, + sizeof(active)), 0, + "Unexpected mallctl failure while activating profiling"); + + prof_dump_open = prof_dump_open_intercept; + + for (i = 0; i < NTHREADS; i++) { + thd_args[i] = i; + thd_create(&thds[i], thd_start, (void *)&thd_args[i]); + } + for (i = 0; i < NTHREADS; i++) { + thd_join(thds[i], NULL); + } +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_idump); +} diff --git a/deps/jemalloc/test/unit/prof_accum.sh b/deps/jemalloc/test/unit/prof_accum.sh new file mode 100644 index 0000000..b3e13fc --- /dev/null +++ b/deps/jemalloc/test/unit/prof_accum.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0" +fi diff --git a/deps/jemalloc/test/unit/prof_active.c b/deps/jemalloc/test/unit/prof_active.c new file mode 100644 index 0000000..850a24a --- /dev/null +++ b/deps/jemalloc/test/unit/prof_active.c @@ -0,0 +1,117 @@ +#include "test/jemalloc_test.h" + +static void +mallctl_bool_get(const char *name, bool expected, const char *func, int line) { + bool old; + size_t sz; + + sz = sizeof(old); + assert_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0, + "%s():%d: Unexpected mallctl failure reading %s", func, line, name); + assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line, + name); +} + +static void +mallctl_bool_set(const char *name, bool old_expected, bool val_new, + const char *func, int line) { + bool old; + size_t sz; + + sz = sizeof(old); + assert_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new, + sizeof(val_new)), 0, + "%s():%d: Unexpected mallctl failure reading/writing %s", func, + line, name); + assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func, + line, name); +} + +static void +mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func, + int line) { + mallctl_bool_get("prof.active", prof_active_old_expected, func, line); +} +#define mallctl_prof_active_get(a) \ + mallctl_prof_active_get_impl(a, __func__, __LINE__) + +static void +mallctl_prof_active_set_impl(bool prof_active_old_expected, + bool prof_active_new, const char *func, int line) { + mallctl_bool_set("prof.active", prof_active_old_expected, + prof_active_new, func, line); +} +#define mallctl_prof_active_set(a, b) \ + mallctl_prof_active_set_impl(a, b, __func__, __LINE__) + +static void +mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected, + const char *func, int line) { + mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected, + func, line); +} +#define mallctl_thread_prof_active_get(a) \ + mallctl_thread_prof_active_get_impl(a, __func__, __LINE__) + +static void +mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected, + bool thread_prof_active_new, const char *func, int line) { + mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected, + thread_prof_active_new, func, line); +} +#define mallctl_thread_prof_active_set(a, b) \ + mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__) + +static void +prof_sampling_probe_impl(bool expect_sample, const char *func, int line) { + void *p; + size_t expected_backtraces = expect_sample ? 1 : 0; + + assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func, + line); + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + assert_zu_eq(prof_bt_count(), expected_backtraces, + "%s():%d: Unexpected backtrace count", func, line); + dallocx(p, 0); +} +#define prof_sampling_probe(a) \ + prof_sampling_probe_impl(a, __func__, __LINE__) + +TEST_BEGIN(test_prof_active) { + test_skip_if(!config_prof); + + mallctl_prof_active_get(true); + mallctl_thread_prof_active_get(false); + + mallctl_prof_active_set(true, true); + mallctl_thread_prof_active_set(false, false); + /* prof.active, !thread.prof.active. */ + prof_sampling_probe(false); + + mallctl_prof_active_set(true, false); + mallctl_thread_prof_active_set(false, false); + /* !prof.active, !thread.prof.active. */ + prof_sampling_probe(false); + + mallctl_prof_active_set(false, false); + mallctl_thread_prof_active_set(false, true); + /* !prof.active, thread.prof.active. */ + prof_sampling_probe(false); + + mallctl_prof_active_set(false, true); + mallctl_thread_prof_active_set(true, true); + /* prof.active, thread.prof.active. */ + prof_sampling_probe(true); + + /* Restore settings. */ + mallctl_prof_active_set(true, true); + mallctl_thread_prof_active_set(true, false); +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_prof_active); +} diff --git a/deps/jemalloc/test/unit/prof_active.sh b/deps/jemalloc/test/unit/prof_active.sh new file mode 100644 index 0000000..0167cb1 --- /dev/null +++ b/deps/jemalloc/test/unit/prof_active.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,prof_thread_active_init:false,lg_prof_sample:0" +fi diff --git a/deps/jemalloc/test/unit/prof_gdump.c b/deps/jemalloc/test/unit/prof_gdump.c new file mode 100644 index 0000000..f7e0aac --- /dev/null +++ b/deps/jemalloc/test/unit/prof_gdump.c @@ -0,0 +1,74 @@ +#include "test/jemalloc_test.h" + +static bool did_prof_dump_open; + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) { + int fd; + + did_prof_dump_open = true; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return fd; +} + +TEST_BEGIN(test_gdump) { + bool active, gdump, gdump_old; + void *p, *q, *r, *s; + size_t sz; + + test_skip_if(!config_prof); + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, + sizeof(active)), 0, + "Unexpected mallctl failure while activating profiling"); + + prof_dump_open = prof_dump_open_intercept; + + did_prof_dump_open = false; + p = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + assert_true(did_prof_dump_open, "Expected a profile dump"); + + did_prof_dump_open = false; + q = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + assert_true(did_prof_dump_open, "Expected a profile dump"); + + gdump = false; + sz = sizeof(gdump_old); + assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, + (void *)&gdump, sizeof(gdump)), 0, + "Unexpected mallctl failure while disabling prof.gdump"); + assert(gdump_old); + did_prof_dump_open = false; + r = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + assert_false(did_prof_dump_open, "Unexpected profile dump"); + + gdump = true; + sz = sizeof(gdump_old); + assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, + (void *)&gdump, sizeof(gdump)), 0, + "Unexpected mallctl failure while enabling prof.gdump"); + assert(!gdump_old); + did_prof_dump_open = false; + s = mallocx((1U << SC_LG_LARGE_MINCLASS), 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + assert_true(did_prof_dump_open, "Expected a profile dump"); + + dallocx(p, 0); + dallocx(q, 0); + dallocx(r, 0); + dallocx(s, 0); +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_gdump); +} diff --git a/deps/jemalloc/test/unit/prof_gdump.sh b/deps/jemalloc/test/unit/prof_gdump.sh new file mode 100644 index 0000000..3f600d2 --- /dev/null +++ b/deps/jemalloc/test/unit/prof_gdump.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,prof_active:false,prof_gdump:true" +fi + diff --git a/deps/jemalloc/test/unit/prof_idump.c b/deps/jemalloc/test/unit/prof_idump.c new file mode 100644 index 0000000..1cc6c98 --- /dev/null +++ b/deps/jemalloc/test/unit/prof_idump.c @@ -0,0 +1,42 @@ +#include "test/jemalloc_test.h" + +static bool did_prof_dump_open; + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) { + int fd; + + did_prof_dump_open = true; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return fd; +} + +TEST_BEGIN(test_idump) { + bool active; + void *p; + + test_skip_if(!config_prof); + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, + sizeof(active)), 0, + "Unexpected mallctl failure while activating profiling"); + + prof_dump_open = prof_dump_open_intercept; + + did_prof_dump_open = false; + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + dallocx(p, 0); + assert_true(did_prof_dump_open, "Expected a profile dump"); +} +TEST_END + +int +main(void) { + return test( + test_idump); +} diff --git a/deps/jemalloc/test/unit/prof_idump.sh b/deps/jemalloc/test/unit/prof_idump.sh new file mode 100644 index 0000000..4dc599a --- /dev/null +++ b/deps/jemalloc/test/unit/prof_idump.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +export MALLOC_CONF="tcache:false" +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="${MALLOC_CONF},prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0" +fi + + diff --git a/deps/jemalloc/test/unit/prof_log.c b/deps/jemalloc/test/unit/prof_log.c new file mode 100644 index 0000000..92fbd7c --- /dev/null +++ b/deps/jemalloc/test/unit/prof_log.c @@ -0,0 +1,148 @@ +#include "test/jemalloc_test.h" + +#define N_PARAM 100 +#define N_THREADS 10 + +static void assert_rep() { + assert_b_eq(prof_log_rep_check(), false, "Rep check failed"); +} + +static void assert_log_empty() { + assert_zu_eq(prof_log_bt_count(), 0, + "The log has backtraces; it isn't empty"); + assert_zu_eq(prof_log_thr_count(), 0, + "The log has threads; it isn't empty"); + assert_zu_eq(prof_log_alloc_count(), 0, + "The log has allocations; it isn't empty"); +} + +void *buf[N_PARAM]; + +static void f() { + int i; + for (i = 0; i < N_PARAM; i++) { + buf[i] = malloc(100); + } + for (i = 0; i < N_PARAM; i++) { + free(buf[i]); + } +} + +TEST_BEGIN(test_prof_log_many_logs) { + int i; + + test_skip_if(!config_prof); + + for (i = 0; i < N_PARAM; i++) { + assert_b_eq(prof_log_is_logging(), false, + "Logging shouldn't have started yet"); + assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure when starting logging"); + assert_b_eq(prof_log_is_logging(), true, + "Logging should be started by now"); + assert_log_empty(); + assert_rep(); + f(); + assert_zu_eq(prof_log_thr_count(), 1, "Wrong thread count"); + assert_rep(); + assert_b_eq(prof_log_is_logging(), true, + "Logging should still be on"); + assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure when stopping logging"); + assert_b_eq(prof_log_is_logging(), false, + "Logging should have turned off"); + } +} +TEST_END + +thd_t thr_buf[N_THREADS]; + +static void *f_thread(void *unused) { + int i; + for (i = 0; i < N_PARAM; i++) { + void *p = malloc(100); + memset(p, 100, sizeof(char)); + free(p); + } + + return NULL; +} + +TEST_BEGIN(test_prof_log_many_threads) { + + test_skip_if(!config_prof); + + int i; + assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure when starting logging"); + for (i = 0; i < N_THREADS; i++) { + thd_create(&thr_buf[i], &f_thread, NULL); + } + + for (i = 0; i < N_THREADS; i++) { + thd_join(thr_buf[i], NULL); + } + assert_zu_eq(prof_log_thr_count(), N_THREADS, + "Wrong number of thread entries"); + assert_rep(); + assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure when stopping logging"); +} +TEST_END + +static void f3() { + void *p = malloc(100); + free(p); +} + +static void f1() { + void *p = malloc(100); + f3(); + free(p); +} + +static void f2() { + void *p = malloc(100); + free(p); +} + +TEST_BEGIN(test_prof_log_many_traces) { + + test_skip_if(!config_prof); + + assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure when starting logging"); + int i; + assert_rep(); + assert_log_empty(); + for (i = 0; i < N_PARAM; i++) { + assert_rep(); + f1(); + assert_rep(); + f2(); + assert_rep(); + f3(); + assert_rep(); + } + /* + * There should be 8 total backtraces: two for malloc/free in f1(), two + * for malloc/free in f2(), two for malloc/free in f3(), and then two + * for malloc/free in f1()'s call to f3(). However compiler + * optimizations such as loop unrolling might generate more call sites. + * So >= 8 traces are expected. + */ + assert_zu_ge(prof_log_bt_count(), 8, + "Expect at least 8 backtraces given sample workload"); + assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure when stopping logging"); +} +TEST_END + +int +main(void) { + prof_log_dummy_set(true); + return test_no_reentrancy( + test_prof_log_many_logs, + test_prof_log_many_traces, + test_prof_log_many_threads); +} diff --git a/deps/jemalloc/test/unit/prof_log.sh b/deps/jemalloc/test/unit/prof_log.sh new file mode 100644 index 0000000..8fcc7d8 --- /dev/null +++ b/deps/jemalloc/test/unit/prof_log.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,lg_prof_sample:0" +fi diff --git a/deps/jemalloc/test/unit/prof_reset.c b/deps/jemalloc/test/unit/prof_reset.c new file mode 100644 index 0000000..7cce42d --- /dev/null +++ b/deps/jemalloc/test/unit/prof_reset.c @@ -0,0 +1,286 @@ +#include "test/jemalloc_test.h" + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) { + int fd; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return fd; +} + +static void +set_prof_active(bool active) { + assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, + sizeof(active)), 0, "Unexpected mallctl failure"); +} + +static size_t +get_lg_prof_sample(void) { + size_t lg_prof_sample; + size_t sz = sizeof(size_t); + + assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz, + NULL, 0), 0, + "Unexpected mallctl failure while reading profiling sample rate"); + return lg_prof_sample; +} + +static void +do_prof_reset(size_t lg_prof_sample) { + assert_d_eq(mallctl("prof.reset", NULL, NULL, + (void *)&lg_prof_sample, sizeof(size_t)), 0, + "Unexpected mallctl failure while resetting profile data"); + assert_zu_eq(lg_prof_sample, get_lg_prof_sample(), + "Expected profile sample rate change"); +} + +TEST_BEGIN(test_prof_reset_basic) { + size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next; + size_t sz; + unsigned i; + + test_skip_if(!config_prof); + + sz = sizeof(size_t); + assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig, + &sz, NULL, 0), 0, + "Unexpected mallctl failure while reading profiling sample rate"); + assert_zu_eq(lg_prof_sample_orig, 0, + "Unexpected profiling sample rate"); + lg_prof_sample = get_lg_prof_sample(); + assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, + "Unexpected disagreement between \"opt.lg_prof_sample\" and " + "\"prof.lg_sample\""); + + /* Test simple resets. */ + for (i = 0; i < 2; i++) { + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure while resetting profile data"); + lg_prof_sample = get_lg_prof_sample(); + assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, + "Unexpected profile sample rate change"); + } + + /* Test resets with prof.lg_sample changes. */ + lg_prof_sample_next = 1; + for (i = 0; i < 2; i++) { + do_prof_reset(lg_prof_sample_next); + lg_prof_sample = get_lg_prof_sample(); + assert_zu_eq(lg_prof_sample, lg_prof_sample_next, + "Expected profile sample rate change"); + lg_prof_sample_next = lg_prof_sample_orig; + } + + /* Make sure the test code restored prof.lg_sample. */ + lg_prof_sample = get_lg_prof_sample(); + assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, + "Unexpected disagreement between \"opt.lg_prof_sample\" and " + "\"prof.lg_sample\""); +} +TEST_END + +bool prof_dump_header_intercepted = false; +prof_cnt_t cnt_all_copy = {0, 0, 0, 0}; +static bool +prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err, + const prof_cnt_t *cnt_all) { + prof_dump_header_intercepted = true; + memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t)); + + return false; +} + +TEST_BEGIN(test_prof_reset_cleanup) { + void *p; + prof_dump_header_t *prof_dump_header_orig; + + test_skip_if(!config_prof); + + set_prof_active(true); + + assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); + + prof_dump_header_orig = prof_dump_header; + prof_dump_header = prof_dump_header_intercept; + assert_false(prof_dump_header_intercepted, "Unexpected intercept"); + + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + assert_true(prof_dump_header_intercepted, "Expected intercept"); + assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation"); + + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, + "Unexpected error while resetting heap profile data"); + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations"); + assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); + + prof_dump_header = prof_dump_header_orig; + + dallocx(p, 0); + assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); + + set_prof_active(false); +} +TEST_END + +#define NTHREADS 4 +#define NALLOCS_PER_THREAD (1U << 13) +#define OBJ_RING_BUF_COUNT 1531 +#define RESET_INTERVAL (1U << 10) +#define DUMP_INTERVAL 3677 +static void * +thd_start(void *varg) { + unsigned thd_ind = *(unsigned *)varg; + unsigned i; + void *objs[OBJ_RING_BUF_COUNT]; + + memset(objs, 0, sizeof(objs)); + + for (i = 0; i < NALLOCS_PER_THREAD; i++) { + if (i % RESET_INTERVAL == 0) { + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), + 0, "Unexpected error while resetting heap profile " + "data"); + } + + if (i % DUMP_INTERVAL == 0) { + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + } + + { + void **pp = &objs[i % OBJ_RING_BUF_COUNT]; + if (*pp != NULL) { + dallocx(*pp, 0); + *pp = NULL; + } + *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i); + assert_ptr_not_null(*pp, + "Unexpected btalloc() failure"); + } + } + + /* Clean up any remaining objects. */ + for (i = 0; i < OBJ_RING_BUF_COUNT; i++) { + void **pp = &objs[i % OBJ_RING_BUF_COUNT]; + if (*pp != NULL) { + dallocx(*pp, 0); + *pp = NULL; + } + } + + return NULL; +} + +TEST_BEGIN(test_prof_reset) { + size_t lg_prof_sample_orig; + thd_t thds[NTHREADS]; + unsigned thd_args[NTHREADS]; + unsigned i; + size_t bt_count, tdata_count; + + test_skip_if(!config_prof); + + bt_count = prof_bt_count(); + assert_zu_eq(bt_count, 0, + "Unexpected pre-existing tdata structures"); + tdata_count = prof_tdata_count(); + + lg_prof_sample_orig = get_lg_prof_sample(); + do_prof_reset(5); + + set_prof_active(true); + + for (i = 0; i < NTHREADS; i++) { + thd_args[i] = i; + thd_create(&thds[i], thd_start, (void *)&thd_args[i]); + } + for (i = 0; i < NTHREADS; i++) { + thd_join(thds[i], NULL); + } + + assert_zu_eq(prof_bt_count(), bt_count, + "Unexpected bactrace count change"); + assert_zu_eq(prof_tdata_count(), tdata_count, + "Unexpected remaining tdata structures"); + + set_prof_active(false); + + do_prof_reset(lg_prof_sample_orig); +} +TEST_END +#undef NTHREADS +#undef NALLOCS_PER_THREAD +#undef OBJ_RING_BUF_COUNT +#undef RESET_INTERVAL +#undef DUMP_INTERVAL + +/* Test sampling at the same allocation site across resets. */ +#define NITER 10 +TEST_BEGIN(test_xallocx) { + size_t lg_prof_sample_orig; + unsigned i; + void *ptrs[NITER]; + + test_skip_if(!config_prof); + + lg_prof_sample_orig = get_lg_prof_sample(); + set_prof_active(true); + + /* Reset profiling. */ + do_prof_reset(0); + + for (i = 0; i < NITER; i++) { + void *p; + size_t sz, nsz; + + /* Reset profiling. */ + do_prof_reset(0); + + /* Allocate small object (which will be promoted). */ + p = ptrs[i] = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + /* Reset profiling. */ + do_prof_reset(0); + + /* Perform successful xallocx(). */ + sz = sallocx(p, 0); + assert_zu_eq(xallocx(p, sz, 0, 0), sz, + "Unexpected xallocx() failure"); + + /* Perform unsuccessful xallocx(). */ + nsz = nallocx(sz+1, 0); + assert_zu_eq(xallocx(p, nsz, 0, 0), sz, + "Unexpected xallocx() success"); + } + + for (i = 0; i < NITER; i++) { + /* dallocx. */ + dallocx(ptrs[i], 0); + } + + set_prof_active(false); + do_prof_reset(lg_prof_sample_orig); +} +TEST_END +#undef NITER + +int +main(void) { + /* Intercept dumping prior to running any tests. */ + prof_dump_open = prof_dump_open_intercept; + + return test_no_reentrancy( + test_prof_reset_basic, + test_prof_reset_cleanup, + test_prof_reset, + test_xallocx); +} diff --git a/deps/jemalloc/test/unit/prof_reset.sh b/deps/jemalloc/test/unit/prof_reset.sh new file mode 100644 index 0000000..43c516a --- /dev/null +++ b/deps/jemalloc/test/unit/prof_reset.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,prof_active:false,lg_prof_sample:0" +fi diff --git a/deps/jemalloc/test/unit/prof_tctx.c b/deps/jemalloc/test/unit/prof_tctx.c new file mode 100644 index 0000000..ff3b2b0 --- /dev/null +++ b/deps/jemalloc/test/unit/prof_tctx.c @@ -0,0 +1,46 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_prof_realloc) { + tsdn_t *tsdn; + int flags; + void *p, *q; + prof_tctx_t *tctx_p, *tctx_q; + uint64_t curobjs_0, curobjs_1, curobjs_2, curobjs_3; + + test_skip_if(!config_prof); + + tsdn = tsdn_fetch(); + flags = MALLOCX_TCACHE_NONE; + + prof_cnt_all(&curobjs_0, NULL, NULL, NULL); + p = mallocx(1024, flags); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + tctx_p = prof_tctx_get(tsdn, p, NULL); + assert_ptr_ne(tctx_p, (prof_tctx_t *)(uintptr_t)1U, + "Expected valid tctx"); + prof_cnt_all(&curobjs_1, NULL, NULL, NULL); + assert_u64_eq(curobjs_0 + 1, curobjs_1, + "Allocation should have increased sample size"); + + q = rallocx(p, 2048, flags); + assert_ptr_ne(p, q, "Expected move"); + assert_ptr_not_null(p, "Unexpected rmallocx() failure"); + tctx_q = prof_tctx_get(tsdn, q, NULL); + assert_ptr_ne(tctx_q, (prof_tctx_t *)(uintptr_t)1U, + "Expected valid tctx"); + prof_cnt_all(&curobjs_2, NULL, NULL, NULL); + assert_u64_eq(curobjs_1, curobjs_2, + "Reallocation should not have changed sample size"); + + dallocx(q, flags); + prof_cnt_all(&curobjs_3, NULL, NULL, NULL); + assert_u64_eq(curobjs_0, curobjs_3, + "Sample size should have returned to base level"); +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_prof_realloc); +} diff --git a/deps/jemalloc/test/unit/prof_tctx.sh b/deps/jemalloc/test/unit/prof_tctx.sh new file mode 100644 index 0000000..8fcc7d8 --- /dev/null +++ b/deps/jemalloc/test/unit/prof_tctx.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,lg_prof_sample:0" +fi diff --git a/deps/jemalloc/test/unit/prof_thread_name.c b/deps/jemalloc/test/unit/prof_thread_name.c new file mode 100644 index 0000000..c9c2a2b --- /dev/null +++ b/deps/jemalloc/test/unit/prof_thread_name.c @@ -0,0 +1,120 @@ +#include "test/jemalloc_test.h" + +static void +mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func, + int line) { + const char *thread_name_old; + size_t sz; + + sz = sizeof(thread_name_old); + assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz, + NULL, 0), 0, + "%s():%d: Unexpected mallctl failure reading thread.prof.name", + func, line); + assert_str_eq(thread_name_old, thread_name_expected, + "%s():%d: Unexpected thread.prof.name value", func, line); +} +#define mallctl_thread_name_get(a) \ + mallctl_thread_name_get_impl(a, __func__, __LINE__) + +static void +mallctl_thread_name_set_impl(const char *thread_name, const char *func, + int line) { + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, + (void *)&thread_name, sizeof(thread_name)), 0, + "%s():%d: Unexpected mallctl failure reading thread.prof.name", + func, line); + mallctl_thread_name_get_impl(thread_name, func, line); +} +#define mallctl_thread_name_set(a) \ + mallctl_thread_name_set_impl(a, __func__, __LINE__) + +TEST_BEGIN(test_prof_thread_name_validation) { + const char *thread_name; + + test_skip_if(!config_prof); + + mallctl_thread_name_get(""); + mallctl_thread_name_set("hi there"); + + /* NULL input shouldn't be allowed. */ + thread_name = NULL; + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, + (void *)&thread_name, sizeof(thread_name)), EFAULT, + "Unexpected mallctl result writing \"%s\" to thread.prof.name", + thread_name); + + /* '\n' shouldn't be allowed. */ + thread_name = "hi\nthere"; + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, + (void *)&thread_name, sizeof(thread_name)), EFAULT, + "Unexpected mallctl result writing \"%s\" to thread.prof.name", + thread_name); + + /* Simultaneous read/write shouldn't be allowed. */ + { + const char *thread_name_old; + size_t sz; + + sz = sizeof(thread_name_old); + assert_d_eq(mallctl("thread.prof.name", + (void *)&thread_name_old, &sz, (void *)&thread_name, + sizeof(thread_name)), EPERM, + "Unexpected mallctl result writing \"%s\" to " + "thread.prof.name", thread_name); + } + + mallctl_thread_name_set(""); +} +TEST_END + +#define NTHREADS 4 +#define NRESET 25 +static void * +thd_start(void *varg) { + unsigned thd_ind = *(unsigned *)varg; + char thread_name[16] = ""; + unsigned i; + + malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind); + + mallctl_thread_name_get(""); + mallctl_thread_name_set(thread_name); + + for (i = 0; i < NRESET; i++) { + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, + "Unexpected error while resetting heap profile data"); + mallctl_thread_name_get(thread_name); + } + + mallctl_thread_name_set(thread_name); + mallctl_thread_name_set(""); + + return NULL; +} + +TEST_BEGIN(test_prof_thread_name_threaded) { + thd_t thds[NTHREADS]; + unsigned thd_args[NTHREADS]; + unsigned i; + + test_skip_if(!config_prof); + + for (i = 0; i < NTHREADS; i++) { + thd_args[i] = i; + thd_create(&thds[i], thd_start, (void *)&thd_args[i]); + } + for (i = 0; i < NTHREADS; i++) { + thd_join(thds[i], NULL); + } +} +TEST_END +#undef NTHREADS +#undef NRESET + +int +main(void) { + return test( + test_prof_thread_name_validation, + test_prof_thread_name_threaded); +} diff --git a/deps/jemalloc/test/unit/prof_thread_name.sh b/deps/jemalloc/test/unit/prof_thread_name.sh new file mode 100644 index 0000000..298c105 --- /dev/null +++ b/deps/jemalloc/test/unit/prof_thread_name.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,prof_active:false" +fi diff --git a/deps/jemalloc/test/unit/ql.c b/deps/jemalloc/test/unit/ql.c new file mode 100644 index 0000000..b76c24c --- /dev/null +++ b/deps/jemalloc/test/unit/ql.c @@ -0,0 +1,204 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/ql.h" + +/* Number of ring entries, in [2..26]. */ +#define NENTRIES 9 + +typedef struct list_s list_t; +typedef ql_head(list_t) list_head_t; + +struct list_s { + ql_elm(list_t) link; + char id; +}; + +static void +test_empty_list(list_head_t *head) { + list_t *t; + unsigned i; + + assert_ptr_null(ql_first(head), "Unexpected element for empty list"); + assert_ptr_null(ql_last(head, link), + "Unexpected element for empty list"); + + i = 0; + ql_foreach(t, head, link) { + i++; + } + assert_u_eq(i, 0, "Unexpected element for empty list"); + + i = 0; + ql_reverse_foreach(t, head, link) { + i++; + } + assert_u_eq(i, 0, "Unexpected element for empty list"); +} + +TEST_BEGIN(test_ql_empty) { + list_head_t head; + + ql_new(&head); + test_empty_list(&head); +} +TEST_END + +static void +init_entries(list_t *entries, unsigned nentries) { + unsigned i; + + for (i = 0; i < nentries; i++) { + entries[i].id = 'a' + i; + ql_elm_new(&entries[i], link); + } +} + +static void +test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) { + list_t *t; + unsigned i; + + assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch"); + assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id, + "Element id mismatch"); + + i = 0; + ql_foreach(t, head, link) { + assert_c_eq(t->id, entries[i].id, "Element id mismatch"); + i++; + } + + i = 0; + ql_reverse_foreach(t, head, link) { + assert_c_eq(t->id, entries[nentries-i-1].id, + "Element id mismatch"); + i++; + } + + for (i = 0; i < nentries-1; i++) { + t = ql_next(head, &entries[i], link); + assert_c_eq(t->id, entries[i+1].id, "Element id mismatch"); + } + assert_ptr_null(ql_next(head, &entries[nentries-1], link), + "Unexpected element"); + + assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element"); + for (i = 1; i < nentries; i++) { + t = ql_prev(head, &entries[i], link); + assert_c_eq(t->id, entries[i-1].id, "Element id mismatch"); + } +} + +TEST_BEGIN(test_ql_tail_insert) { + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) { + ql_tail_insert(&head, &entries[i], link); + } + + test_entries_list(&head, entries, NENTRIES); +} +TEST_END + +TEST_BEGIN(test_ql_tail_remove) { + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) { + ql_tail_insert(&head, &entries[i], link); + } + + for (i = 0; i < NENTRIES; i++) { + test_entries_list(&head, entries, NENTRIES-i); + ql_tail_remove(&head, list_t, link); + } + test_empty_list(&head); +} +TEST_END + +TEST_BEGIN(test_ql_head_insert) { + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) { + ql_head_insert(&head, &entries[NENTRIES-i-1], link); + } + + test_entries_list(&head, entries, NENTRIES); +} +TEST_END + +TEST_BEGIN(test_ql_head_remove) { + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) { + ql_head_insert(&head, &entries[NENTRIES-i-1], link); + } + + for (i = 0; i < NENTRIES; i++) { + test_entries_list(&head, &entries[i], NENTRIES-i); + ql_head_remove(&head, list_t, link); + } + test_empty_list(&head); +} +TEST_END + +TEST_BEGIN(test_ql_insert) { + list_head_t head; + list_t entries[8]; + list_t *a, *b, *c, *d, *e, *f, *g, *h; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + a = &entries[0]; + b = &entries[1]; + c = &entries[2]; + d = &entries[3]; + e = &entries[4]; + f = &entries[5]; + g = &entries[6]; + h = &entries[7]; + + /* + * ql_remove(), ql_before_insert(), and ql_after_insert() are used + * internally by other macros that are already tested, so there's no + * need to test them completely. However, insertion/deletion from the + * middle of lists is not otherwise tested; do so here. + */ + ql_tail_insert(&head, f, link); + ql_before_insert(&head, f, b, link); + ql_before_insert(&head, f, c, link); + ql_after_insert(f, h, link); + ql_after_insert(f, g, link); + ql_before_insert(&head, b, a, link); + ql_after_insert(c, d, link); + ql_before_insert(&head, f, e, link); + + test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t)); +} +TEST_END + +int +main(void) { + return test( + test_ql_empty, + test_ql_tail_insert, + test_ql_tail_remove, + test_ql_head_insert, + test_ql_head_remove, + test_ql_insert); +} diff --git a/deps/jemalloc/test/unit/qr.c b/deps/jemalloc/test/unit/qr.c new file mode 100644 index 0000000..271a109 --- /dev/null +++ b/deps/jemalloc/test/unit/qr.c @@ -0,0 +1,243 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/qr.h" + +/* Number of ring entries, in [2..26]. */ +#define NENTRIES 9 +/* Split index, in [1..NENTRIES). */ +#define SPLIT_INDEX 5 + +typedef struct ring_s ring_t; + +struct ring_s { + qr(ring_t) link; + char id; +}; + +static void +init_entries(ring_t *entries) { + unsigned i; + + for (i = 0; i < NENTRIES; i++) { + qr_new(&entries[i], link); + entries[i].id = 'a' + i; + } +} + +static void +test_independent_entries(ring_t *entries) { + ring_t *t; + unsigned i, j; + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + j++; + } + assert_u_eq(j, 1, + "Iteration over single-element ring should visit precisely " + "one element"); + } + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + j++; + } + assert_u_eq(j, 1, + "Iteration over single-element ring should visit precisely " + "one element"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_next(&entries[i], link); + assert_ptr_eq(t, &entries[i], + "Next element in single-element ring should be same as " + "current element"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_prev(&entries[i], link); + assert_ptr_eq(t, &entries[i], + "Previous element in single-element ring should be same as " + "current element"); + } +} + +TEST_BEGIN(test_qr_one) { + ring_t entries[NENTRIES]; + + init_entries(entries); + test_independent_entries(entries); +} +TEST_END + +static void +test_entries_ring(ring_t *entries) { + ring_t *t; + unsigned i, j; + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(i+j) % NENTRIES].id, + "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(NENTRIES+i-j-1) % + NENTRIES].id, "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + t = qr_next(&entries[i], link); + assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, + "Element id mismatch"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_prev(&entries[i], link); + assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, + "Element id mismatch"); + } +} + +TEST_BEGIN(test_qr_after_insert) { + ring_t entries[NENTRIES]; + unsigned i; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) { + qr_after_insert(&entries[i - 1], &entries[i], link); + } + test_entries_ring(entries); +} +TEST_END + +TEST_BEGIN(test_qr_remove) { + ring_t entries[NENTRIES]; + ring_t *t; + unsigned i, j; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) { + qr_after_insert(&entries[i - 1], &entries[i], link); + } + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[i+j].id, + "Element id mismatch"); + j++; + } + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[NENTRIES - 1 - j].id, + "Element id mismatch"); + j++; + } + qr_remove(&entries[i], link); + } + test_independent_entries(entries); +} +TEST_END + +TEST_BEGIN(test_qr_before_insert) { + ring_t entries[NENTRIES]; + ring_t *t; + unsigned i, j; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) { + qr_before_insert(&entries[i - 1], &entries[i], link); + } + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(NENTRIES+i-j) % + NENTRIES].id, "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id, + "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + t = qr_next(&entries[i], link); + assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, + "Element id mismatch"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_prev(&entries[i], link); + assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, + "Element id mismatch"); + } +} +TEST_END + +static void +test_split_entries(ring_t *entries) { + ring_t *t; + unsigned i, j; + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + if (i < SPLIT_INDEX) { + assert_c_eq(t->id, + entries[(i+j) % SPLIT_INDEX].id, + "Element id mismatch"); + } else { + assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) % + (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id, + "Element id mismatch"); + } + j++; + } + } +} + +TEST_BEGIN(test_qr_meld_split) { + ring_t entries[NENTRIES]; + unsigned i; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) { + qr_after_insert(&entries[i - 1], &entries[i], link); + } + + qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link); + test_split_entries(entries); + + qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link); + test_entries_ring(entries); + + qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link); + test_split_entries(entries); + + qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link); + test_entries_ring(entries); + + qr_split(&entries[0], &entries[0], ring_t, link); + test_entries_ring(entries); + + qr_meld(&entries[0], &entries[0], ring_t, link); + test_entries_ring(entries); +} +TEST_END + +int +main(void) { + return test( + test_qr_one, + test_qr_after_insert, + test_qr_remove, + test_qr_before_insert, + test_qr_meld_split); +} diff --git a/deps/jemalloc/test/unit/rb.c b/deps/jemalloc/test/unit/rb.c new file mode 100644 index 0000000..65c0492 --- /dev/null +++ b/deps/jemalloc/test/unit/rb.c @@ -0,0 +1,355 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/rb.h" + +#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ + a_type *rbp_bh_t; \ + for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; rbp_bh_t != \ + NULL; rbp_bh_t = rbtn_left_get(a_type, a_field, \ + rbp_bh_t)) { \ + if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ + (r_height)++; \ + } \ + } \ +} while (0) + +typedef struct node_s node_t; + +struct node_s { +#define NODE_MAGIC 0x9823af7e + uint32_t magic; + rb_node(node_t) link; + uint64_t key; +}; + +static int +node_cmp(const node_t *a, const node_t *b) { + int ret; + + assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); + assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); + + ret = (a->key > b->key) - (a->key < b->key); + if (ret == 0) { + /* + * Duplicates are not allowed in the tree, so force an + * arbitrary ordering for non-identical items with equal keys. + */ + ret = (((uintptr_t)a) > ((uintptr_t)b)) + - (((uintptr_t)a) < ((uintptr_t)b)); + } + return ret; +} + +typedef rb_tree(node_t) tree_t; +rb_gen(static, tree_, tree_t, node_t, link, node_cmp); + +TEST_BEGIN(test_rb_empty) { + tree_t tree; + node_t key; + + tree_new(&tree); + + assert_true(tree_empty(&tree), "Tree should be empty"); + assert_ptr_null(tree_first(&tree), "Unexpected node"); + assert_ptr_null(tree_last(&tree), "Unexpected node"); + + key.key = 0; + key.magic = NODE_MAGIC; + assert_ptr_null(tree_search(&tree, &key), "Unexpected node"); + + key.key = 0; + key.magic = NODE_MAGIC; + assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node"); + + key.key = 0; + key.magic = NODE_MAGIC; + assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node"); +} +TEST_END + +static unsigned +tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) { + unsigned ret = 0; + node_t *left_node; + node_t *right_node; + + if (node == NULL) { + return ret; + } + + left_node = rbtn_left_get(node_t, link, node); + right_node = rbtn_right_get(node_t, link, node); + + if (!rbtn_red_get(node_t, link, node)) { + black_depth++; + } + + /* Red nodes must be interleaved with black nodes. */ + if (rbtn_red_get(node_t, link, node)) { + if (left_node != NULL) { + assert_false(rbtn_red_get(node_t, link, left_node), + "Node should be black"); + } + if (right_node != NULL) { + assert_false(rbtn_red_get(node_t, link, right_node), + "Node should be black"); + } + } + + /* Self. */ + assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); + + /* Left subtree. */ + if (left_node != NULL) { + ret += tree_recurse(left_node, black_height, black_depth); + } else { + ret += (black_depth != black_height); + } + + /* Right subtree. */ + if (right_node != NULL) { + ret += tree_recurse(right_node, black_height, black_depth); + } else { + ret += (black_depth != black_height); + } + + return ret; +} + +static node_t * +tree_iterate_cb(tree_t *tree, node_t *node, void *data) { + unsigned *i = (unsigned *)data; + node_t *search_node; + + assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); + + /* Test rb_search(). */ + search_node = tree_search(tree, node); + assert_ptr_eq(search_node, node, + "tree_search() returned unexpected node"); + + /* Test rb_nsearch(). */ + search_node = tree_nsearch(tree, node); + assert_ptr_eq(search_node, node, + "tree_nsearch() returned unexpected node"); + + /* Test rb_psearch(). */ + search_node = tree_psearch(tree, node); + assert_ptr_eq(search_node, node, + "tree_psearch() returned unexpected node"); + + (*i)++; + + return NULL; +} + +static unsigned +tree_iterate(tree_t *tree) { + unsigned i; + + i = 0; + tree_iter(tree, NULL, tree_iterate_cb, (void *)&i); + + return i; +} + +static unsigned +tree_iterate_reverse(tree_t *tree) { + unsigned i; + + i = 0; + tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i); + + return i; +} + +static void +node_remove(tree_t *tree, node_t *node, unsigned nnodes) { + node_t *search_node; + unsigned black_height, imbalances; + + tree_remove(tree, node); + + /* Test rb_nsearch(). */ + search_node = tree_nsearch(tree, node); + if (search_node != NULL) { + assert_u64_ge(search_node->key, node->key, + "Key ordering error"); + } + + /* Test rb_psearch(). */ + search_node = tree_psearch(tree, node); + if (search_node != NULL) { + assert_u64_le(search_node->key, node->key, + "Key ordering error"); + } + + node->magic = 0; + + rbtn_black_height(node_t, link, tree, black_height); + imbalances = tree_recurse(tree->rbt_root, black_height, 0); + assert_u_eq(imbalances, 0, "Tree is unbalanced"); + assert_u_eq(tree_iterate(tree), nnodes-1, + "Unexpected node iteration count"); + assert_u_eq(tree_iterate_reverse(tree), nnodes-1, + "Unexpected node iteration count"); +} + +static node_t * +remove_iterate_cb(tree_t *tree, node_t *node, void *data) { + unsigned *nnodes = (unsigned *)data; + node_t *ret = tree_next(tree, node); + + node_remove(tree, node, *nnodes); + + return ret; +} + +static node_t * +remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) { + unsigned *nnodes = (unsigned *)data; + node_t *ret = tree_prev(tree, node); + + node_remove(tree, node, *nnodes); + + return ret; +} + +static void +destroy_cb(node_t *node, void *data) { + unsigned *nnodes = (unsigned *)data; + + assert_u_gt(*nnodes, 0, "Destruction removed too many nodes"); + (*nnodes)--; +} + +TEST_BEGIN(test_rb_random) { +#define NNODES 25 +#define NBAGS 250 +#define SEED 42 + sfmt_t *sfmt; + uint64_t bag[NNODES]; + tree_t tree; + node_t nodes[NNODES]; + unsigned i, j, k, black_height, imbalances; + + sfmt = init_gen_rand(SEED); + for (i = 0; i < NBAGS; i++) { + switch (i) { + case 0: + /* Insert in order. */ + for (j = 0; j < NNODES; j++) { + bag[j] = j; + } + break; + case 1: + /* Insert in reverse order. */ + for (j = 0; j < NNODES; j++) { + bag[j] = NNODES - j - 1; + } + break; + default: + for (j = 0; j < NNODES; j++) { + bag[j] = gen_rand64_range(sfmt, NNODES); + } + } + + for (j = 1; j <= NNODES; j++) { + /* Initialize tree and nodes. */ + tree_new(&tree); + for (k = 0; k < j; k++) { + nodes[k].magic = NODE_MAGIC; + nodes[k].key = bag[k]; + } + + /* Insert nodes. */ + for (k = 0; k < j; k++) { + tree_insert(&tree, &nodes[k]); + + rbtn_black_height(node_t, link, &tree, + black_height); + imbalances = tree_recurse(tree.rbt_root, + black_height, 0); + assert_u_eq(imbalances, 0, + "Tree is unbalanced"); + + assert_u_eq(tree_iterate(&tree), k+1, + "Unexpected node iteration count"); + assert_u_eq(tree_iterate_reverse(&tree), k+1, + "Unexpected node iteration count"); + + assert_false(tree_empty(&tree), + "Tree should not be empty"); + assert_ptr_not_null(tree_first(&tree), + "Tree should not be empty"); + assert_ptr_not_null(tree_last(&tree), + "Tree should not be empty"); + + tree_next(&tree, &nodes[k]); + tree_prev(&tree, &nodes[k]); + } + + /* Remove nodes. */ + switch (i % 5) { + case 0: + for (k = 0; k < j; k++) { + node_remove(&tree, &nodes[k], j - k); + } + break; + case 1: + for (k = j; k > 0; k--) { + node_remove(&tree, &nodes[k-1], k); + } + break; + case 2: { + node_t *start; + unsigned nnodes = j; + + start = NULL; + do { + start = tree_iter(&tree, start, + remove_iterate_cb, (void *)&nnodes); + nnodes--; + } while (start != NULL); + assert_u_eq(nnodes, 0, + "Removal terminated early"); + break; + } case 3: { + node_t *start; + unsigned nnodes = j; + + start = NULL; + do { + start = tree_reverse_iter(&tree, start, + remove_reverse_iterate_cb, + (void *)&nnodes); + nnodes--; + } while (start != NULL); + assert_u_eq(nnodes, 0, + "Removal terminated early"); + break; + } case 4: { + unsigned nnodes = j; + tree_destroy(&tree, destroy_cb, &nnodes); + assert_u_eq(nnodes, 0, + "Destruction terminated early"); + break; + } default: + not_reached(); + } + } + } + fini_gen_rand(sfmt); +#undef NNODES +#undef NBAGS +#undef SEED +} +TEST_END + +int +main(void) { + return test( + test_rb_empty, + test_rb_random); +} diff --git a/deps/jemalloc/test/unit/retained.c b/deps/jemalloc/test/unit/retained.c new file mode 100644 index 0000000..7993fd3 --- /dev/null +++ b/deps/jemalloc/test/unit/retained.c @@ -0,0 +1,184 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/spin.h" + +static unsigned arena_ind; +static size_t sz; +static size_t esz; +#define NEPOCHS 8 +#define PER_THD_NALLOCS 1 +static atomic_u_t epoch; +static atomic_u_t nfinished; + +static unsigned +do_arena_create(extent_hooks_t *h) { + unsigned arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, + (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, + "Unexpected mallctl() failure"); + return arena_ind; +} + +static void +do_arena_destroy(unsigned arena_ind) { + size_t mib[3]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +static void +do_refresh(void) { + uint64_t epoch = 1; + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(epoch)), 0, "Unexpected mallctl() failure"); +} + +static size_t +do_get_size_impl(const char *cmd, unsigned arena_ind) { + size_t mib[4]; + size_t miblen = sizeof(mib) / sizeof(size_t); + size_t z = sizeof(size_t); + + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = arena_ind; + size_t size; + assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd); + + return size; +} + +static size_t +do_get_active(unsigned arena_ind) { + return do_get_size_impl("stats.arenas.0.pactive", arena_ind) * PAGE; +} + +static size_t +do_get_mapped(unsigned arena_ind) { + return do_get_size_impl("stats.arenas.0.mapped", arena_ind); +} + +static void * +thd_start(void *arg) { + for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) { + /* Busy-wait for next epoch. */ + unsigned cur_epoch; + spin_t spinner = SPIN_INITIALIZER; + while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) != + next_epoch) { + spin_adaptive(&spinner); + } + assert_u_eq(cur_epoch, next_epoch, "Unexpected epoch"); + + /* + * Allocate. The main thread will reset the arena, so there's + * no need to deallocate. + */ + for (unsigned i = 0; i < PER_THD_NALLOCS; i++) { + void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) | + MALLOCX_TCACHE_NONE + ); + assert_ptr_not_null(p, + "Unexpected mallocx() failure\n"); + } + + /* Let the main thread know we've finished this iteration. */ + atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE); + } + + return NULL; +} + +TEST_BEGIN(test_retained) { + test_skip_if(!config_stats); + + arena_ind = do_arena_create(NULL); + sz = nallocx(HUGEPAGE, 0); + esz = sz + sz_large_pad; + + atomic_store_u(&epoch, 0, ATOMIC_RELAXED); + + unsigned nthreads = ncpus * 2; + if (LG_SIZEOF_PTR < 3 && nthreads > 16) { + nthreads = 16; /* 32-bit platform could run out of vaddr. */ + } + VARIABLE_ARRAY(thd_t, threads, nthreads); + for (unsigned i = 0; i < nthreads; i++) { + thd_create(&threads[i], thd_start, NULL); + } + + for (unsigned e = 1; e < NEPOCHS; e++) { + atomic_store_u(&nfinished, 0, ATOMIC_RELEASE); + atomic_store_u(&epoch, e, ATOMIC_RELEASE); + + /* Wait for threads to finish allocating. */ + spin_t spinner = SPIN_INITIALIZER; + while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) { + spin_adaptive(&spinner); + } + + /* + * Assert that retained is no more than the sum of size classes + * that should have been used to satisfy the worker threads' + * requests, discounting per growth fragmentation. + */ + do_refresh(); + + size_t allocated = esz * nthreads * PER_THD_NALLOCS; + size_t active = do_get_active(arena_ind); + assert_zu_le(allocated, active, "Unexpected active memory"); + size_t mapped = do_get_mapped(arena_ind); + assert_zu_le(active, mapped, "Unexpected mapped memory"); + + arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false); + size_t usable = 0; + size_t fragmented = 0; + for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind < + arena->extent_grow_next; pind++) { + size_t psz = sz_pind2sz(pind); + size_t psz_fragmented = psz % esz; + size_t psz_usable = psz - psz_fragmented; + /* + * Only consider size classes that wouldn't be skipped. + */ + if (psz_usable > 0) { + assert_zu_lt(usable, allocated, + "Excessive retained memory " + "(%#zx[+%#zx] > %#zx)", usable, psz_usable, + allocated); + fragmented += psz_fragmented; + usable += psz_usable; + } + } + + /* + * Clean up arena. Destroying and recreating the arena + * is simpler that specifying extent hooks that deallocate + * (rather than retaining) during reset. + */ + do_arena_destroy(arena_ind); + assert_u_eq(do_arena_create(NULL), arena_ind, + "Unexpected arena index"); + } + + for (unsigned i = 0; i < nthreads; i++) { + thd_join(threads[i], NULL); + } + + do_arena_destroy(arena_ind); +} +TEST_END + +int +main(void) { + return test( + test_retained); +} diff --git a/deps/jemalloc/test/unit/rtree.c b/deps/jemalloc/test/unit/rtree.c new file mode 100644 index 0000000..90adca1 --- /dev/null +++ b/deps/jemalloc/test/unit/rtree.c @@ -0,0 +1,228 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/rtree.h" + +rtree_node_alloc_t *rtree_node_alloc_orig; +rtree_node_dalloc_t *rtree_node_dalloc_orig; +rtree_leaf_alloc_t *rtree_leaf_alloc_orig; +rtree_leaf_dalloc_t *rtree_leaf_dalloc_orig; + +/* Potentially too large to safely place on the stack. */ +rtree_t test_rtree; + +static rtree_node_elm_t * +rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + rtree_node_elm_t *node; + + if (rtree != &test_rtree) { + return rtree_node_alloc_orig(tsdn, rtree, nelms); + } + + malloc_mutex_unlock(tsdn, &rtree->init_lock); + node = (rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t)); + assert_ptr_not_null(node, "Unexpected calloc() failure"); + malloc_mutex_lock(tsdn, &rtree->init_lock); + + return node; +} + +static void +rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, + rtree_node_elm_t *node) { + if (rtree != &test_rtree) { + rtree_node_dalloc_orig(tsdn, rtree, node); + return; + } + + free(node); +} + +static rtree_leaf_elm_t * +rtree_leaf_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + rtree_leaf_elm_t *leaf; + + if (rtree != &test_rtree) { + return rtree_leaf_alloc_orig(tsdn, rtree, nelms); + } + + malloc_mutex_unlock(tsdn, &rtree->init_lock); + leaf = (rtree_leaf_elm_t *)calloc(nelms, sizeof(rtree_leaf_elm_t)); + assert_ptr_not_null(leaf, "Unexpected calloc() failure"); + malloc_mutex_lock(tsdn, &rtree->init_lock); + + return leaf; +} + +static void +rtree_leaf_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *leaf) { + if (rtree != &test_rtree) { + rtree_leaf_dalloc_orig(tsdn, rtree, leaf); + return; + } + + free(leaf); +} + +TEST_BEGIN(test_rtree_read_empty) { + tsdn_t *tsdn; + + tsdn = tsdn_fetch(); + + rtree_t *rtree = &test_rtree; + rtree_ctx_t rtree_ctx; + rtree_ctx_data_init(&rtree_ctx); + assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, + false), "rtree_extent_read() should return NULL for empty tree"); + rtree_delete(tsdn, rtree); +} +TEST_END + +#undef NTHREADS +#undef NITERS +#undef SEED + +TEST_BEGIN(test_rtree_extrema) { + extent_t extent_a, extent_b; + extent_init(&extent_a, NULL, NULL, SC_LARGE_MINCLASS, false, + sz_size2index(SC_LARGE_MINCLASS), 0, + extent_state_active, false, false, true, EXTENT_NOT_HEAD); + extent_init(&extent_b, NULL, NULL, 0, false, SC_NSIZES, 0, + extent_state_active, false, false, true, EXTENT_NOT_HEAD); + + tsdn_t *tsdn = tsdn_fetch(); + + rtree_t *rtree = &test_rtree; + rtree_ctx_t rtree_ctx; + rtree_ctx_data_init(&rtree_ctx); + assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + + assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &extent_a, + extent_szind_get(&extent_a), extent_slab_get(&extent_a)), + "Unexpected rtree_write() failure"); + rtree_szind_slab_update(tsdn, rtree, &rtree_ctx, PAGE, + extent_szind_get(&extent_a), extent_slab_get(&extent_a)); + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, true), + &extent_a, + "rtree_extent_read() should return previously set value"); + + assert_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0), + &extent_b, extent_szind_get_maybe_invalid(&extent_b), + extent_slab_get(&extent_b)), "Unexpected rtree_write() failure"); + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, + ~((uintptr_t)0), true), &extent_b, + "rtree_extent_read() should return previously set value"); + + rtree_delete(tsdn, rtree); +} +TEST_END + +TEST_BEGIN(test_rtree_bits) { + tsdn_t *tsdn = tsdn_fetch(); + + uintptr_t keys[] = {PAGE, PAGE + 1, + PAGE + (((uintptr_t)1) << LG_PAGE) - 1}; + + extent_t extent; + extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0, + extent_state_active, false, false, true, EXTENT_NOT_HEAD); + + rtree_t *rtree = &test_rtree; + rtree_ctx_t rtree_ctx; + rtree_ctx_data_init(&rtree_ctx); + assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + + for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) { + assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i], + &extent, SC_NSIZES, false), + "Unexpected rtree_write() failure"); + for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[j], true), &extent, + "rtree_extent_read() should return previously set " + "value and ignore insignificant key bits; i=%u, " + "j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, + j, keys[i], keys[j]); + } + assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, + (((uintptr_t)2) << LG_PAGE), false), + "Only leftmost rtree leaf should be set; i=%u", i); + rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]); + } + + rtree_delete(tsdn, rtree); +} +TEST_END + +TEST_BEGIN(test_rtree_random) { +#define NSET 16 +#define SEED 42 + sfmt_t *sfmt = init_gen_rand(SEED); + tsdn_t *tsdn = tsdn_fetch(); + uintptr_t keys[NSET]; + rtree_t *rtree = &test_rtree; + rtree_ctx_t rtree_ctx; + rtree_ctx_data_init(&rtree_ctx); + + extent_t extent; + extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0, + extent_state_active, false, false, true, EXTENT_NOT_HEAD); + + assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + + for (unsigned i = 0; i < NSET; i++) { + keys[i] = (uintptr_t)gen_rand64(sfmt); + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, + &rtree_ctx, keys[i], false, true); + assert_ptr_not_null(elm, + "Unexpected rtree_leaf_elm_lookup() failure"); + rtree_leaf_elm_write(tsdn, rtree, elm, &extent, SC_NSIZES, + false); + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[i], true), &extent, + "rtree_extent_read() should return previously set value"); + } + for (unsigned i = 0; i < NSET; i++) { + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[i], true), &extent, + "rtree_extent_read() should return previously set value, " + "i=%u", i); + } + + for (unsigned i = 0; i < NSET; i++) { + rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]); + assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[i], true), + "rtree_extent_read() should return previously set value"); + } + for (unsigned i = 0; i < NSET; i++) { + assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[i], true), + "rtree_extent_read() should return previously set value"); + } + + rtree_delete(tsdn, rtree); + fini_gen_rand(sfmt); +#undef NSET +#undef SEED +} +TEST_END + +int +main(void) { + rtree_node_alloc_orig = rtree_node_alloc; + rtree_node_alloc = rtree_node_alloc_intercept; + rtree_node_dalloc_orig = rtree_node_dalloc; + rtree_node_dalloc = rtree_node_dalloc_intercept; + rtree_leaf_alloc_orig = rtree_leaf_alloc; + rtree_leaf_alloc = rtree_leaf_alloc_intercept; + rtree_leaf_dalloc_orig = rtree_leaf_dalloc; + rtree_leaf_dalloc = rtree_leaf_dalloc_intercept; + + return test( + test_rtree_read_empty, + test_rtree_extrema, + test_rtree_bits, + test_rtree_random); +} diff --git a/deps/jemalloc/test/unit/safety_check.c b/deps/jemalloc/test/unit/safety_check.c new file mode 100644 index 0000000..bf4bd86 --- /dev/null +++ b/deps/jemalloc/test/unit/safety_check.c @@ -0,0 +1,156 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/safety_check.h" + +/* + * Note that we get called through safety_check.sh, which turns on sampling for + * everything. + */ + +bool fake_abort_called; +void fake_abort(const char *message) { + (void)message; + fake_abort_called = true; +} + +TEST_BEGIN(test_malloc_free_overflow) { + test_skip_if(!config_prof); + test_skip_if(!config_opt_safety_checks); + + safety_check_set_abort(&fake_abort); + /* Buffer overflow! */ + char* ptr = malloc(128); + ptr[128] = 0; + free(ptr); + safety_check_set_abort(NULL); + + assert_b_eq(fake_abort_called, true, "Redzone check didn't fire."); + fake_abort_called = false; +} +TEST_END + +TEST_BEGIN(test_mallocx_dallocx_overflow) { + test_skip_if(!config_prof); + test_skip_if(!config_opt_safety_checks); + + safety_check_set_abort(&fake_abort); + /* Buffer overflow! */ + char* ptr = mallocx(128, 0); + ptr[128] = 0; + dallocx(ptr, 0); + safety_check_set_abort(NULL); + + assert_b_eq(fake_abort_called, true, "Redzone check didn't fire."); + fake_abort_called = false; +} +TEST_END + +TEST_BEGIN(test_malloc_sdallocx_overflow) { + test_skip_if(!config_prof); + test_skip_if(!config_opt_safety_checks); + + safety_check_set_abort(&fake_abort); + /* Buffer overflow! */ + char* ptr = malloc(128); + ptr[128] = 0; + sdallocx(ptr, 128, 0); + safety_check_set_abort(NULL); + + assert_b_eq(fake_abort_called, true, "Redzone check didn't fire."); + fake_abort_called = false; +} +TEST_END + +TEST_BEGIN(test_realloc_overflow) { + test_skip_if(!config_prof); + test_skip_if(!config_opt_safety_checks); + + safety_check_set_abort(&fake_abort); + /* Buffer overflow! */ + char* ptr = malloc(128); + ptr[128] = 0; + ptr = realloc(ptr, 129); + safety_check_set_abort(NULL); + free(ptr); + + assert_b_eq(fake_abort_called, true, "Redzone check didn't fire."); + fake_abort_called = false; +} +TEST_END + +TEST_BEGIN(test_rallocx_overflow) { + test_skip_if(!config_prof); + test_skip_if(!config_opt_safety_checks); + + safety_check_set_abort(&fake_abort); + /* Buffer overflow! */ + char* ptr = malloc(128); + ptr[128] = 0; + ptr = rallocx(ptr, 129, 0); + safety_check_set_abort(NULL); + free(ptr); + + assert_b_eq(fake_abort_called, true, "Redzone check didn't fire."); + fake_abort_called = false; +} +TEST_END + +TEST_BEGIN(test_xallocx_overflow) { + test_skip_if(!config_prof); + test_skip_if(!config_opt_safety_checks); + + safety_check_set_abort(&fake_abort); + /* Buffer overflow! */ + char* ptr = malloc(128); + ptr[128] = 0; + size_t result = xallocx(ptr, 129, 0, 0); + assert_zu_eq(result, 128, ""); + free(ptr); + assert_b_eq(fake_abort_called, true, "Redzone check didn't fire."); + fake_abort_called = false; + safety_check_set_abort(NULL); +} +TEST_END + +TEST_BEGIN(test_realloc_no_overflow) { + char* ptr = malloc(128); + ptr = realloc(ptr, 256); + ptr[128] = 0; + ptr[255] = 0; + free(ptr); + + ptr = malloc(128); + ptr = realloc(ptr, 64); + ptr[63] = 0; + ptr[0] = 0; + free(ptr); +} +TEST_END + +TEST_BEGIN(test_rallocx_no_overflow) { + char* ptr = malloc(128); + ptr = rallocx(ptr, 256, 0); + ptr[128] = 0; + ptr[255] = 0; + free(ptr); + + ptr = malloc(128); + ptr = rallocx(ptr, 64, 0); + ptr[63] = 0; + ptr[0] = 0; + free(ptr); +} +TEST_END + +int +main(void) { + return test( + test_malloc_free_overflow, + test_mallocx_dallocx_overflow, + test_malloc_sdallocx_overflow, + test_realloc_overflow, + test_rallocx_overflow, + test_xallocx_overflow, + test_realloc_no_overflow, + test_rallocx_no_overflow); +} diff --git a/deps/jemalloc/test/unit/safety_check.sh b/deps/jemalloc/test/unit/safety_check.sh new file mode 100644 index 0000000..8fcc7d8 --- /dev/null +++ b/deps/jemalloc/test/unit/safety_check.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,lg_prof_sample:0" +fi diff --git a/deps/jemalloc/test/unit/sc.c b/deps/jemalloc/test/unit/sc.c new file mode 100644 index 0000000..bf51d8e --- /dev/null +++ b/deps/jemalloc/test/unit/sc.c @@ -0,0 +1,33 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_update_slab_size) { + sc_data_t data; + memset(&data, 0, sizeof(data)); + sc_data_init(&data); + sc_t *tiny = &data.sc[0]; + size_t tiny_size = (ZU(1) << tiny->lg_base) + + (ZU(tiny->ndelta) << tiny->lg_delta); + size_t pgs_too_big = (tiny_size * BITMAP_MAXBITS + PAGE - 1) / PAGE + 1; + sc_data_update_slab_size(&data, tiny_size, tiny_size, (int)pgs_too_big); + assert_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages"); + + sc_data_update_slab_size(&data, 1, 10 * PAGE, 1); + for (int i = 0; i < data.nbins; i++) { + sc_t *sc = &data.sc[i]; + size_t reg_size = (ZU(1) << sc->lg_base) + + (ZU(sc->ndelta) << sc->lg_delta); + if (reg_size <= PAGE) { + assert_d_eq(sc->pgs, 1, "Ignored valid page size hint"); + } else { + assert_d_gt(sc->pgs, 1, + "Allowed invalid page size hint"); + } + } +} +TEST_END + +int +main(void) { + return test( + test_update_slab_size); +} diff --git a/deps/jemalloc/test/unit/seq.c b/deps/jemalloc/test/unit/seq.c new file mode 100644 index 0000000..19613b0 --- /dev/null +++ b/deps/jemalloc/test/unit/seq.c @@ -0,0 +1,95 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/seq.h" + +typedef struct data_s data_t; +struct data_s { + int arr[10]; +}; + +static void +set_data(data_t *data, int num) { + for (int i = 0; i < 10; i++) { + data->arr[i] = num; + } +} + +static void +assert_data(data_t *data) { + int num = data->arr[0]; + for (int i = 0; i < 10; i++) { + assert_d_eq(num, data->arr[i], "Data consistency error"); + } +} + +seq_define(data_t, data) + +typedef struct thd_data_s thd_data_t; +struct thd_data_s { + seq_data_t data; +}; + +static void * +seq_reader_thd(void *arg) { + thd_data_t *thd_data = (thd_data_t *)arg; + int iter = 0; + data_t local_data; + while (iter < 1000 * 1000 - 1) { + bool success = seq_try_load_data(&local_data, &thd_data->data); + if (success) { + assert_data(&local_data); + assert_d_le(iter, local_data.arr[0], + "Seq read went back in time."); + iter = local_data.arr[0]; + } + } + return NULL; +} + +static void * +seq_writer_thd(void *arg) { + thd_data_t *thd_data = (thd_data_t *)arg; + data_t local_data; + memset(&local_data, 0, sizeof(local_data)); + for (int i = 0; i < 1000 * 1000; i++) { + set_data(&local_data, i); + seq_store_data(&thd_data->data, &local_data); + } + return NULL; +} + +TEST_BEGIN(test_seq_threaded) { + thd_data_t thd_data; + memset(&thd_data, 0, sizeof(thd_data)); + + thd_t reader; + thd_t writer; + + thd_create(&reader, seq_reader_thd, &thd_data); + thd_create(&writer, seq_writer_thd, &thd_data); + + thd_join(reader, NULL); + thd_join(writer, NULL); +} +TEST_END + +TEST_BEGIN(test_seq_simple) { + data_t data; + seq_data_t seq; + memset(&seq, 0, sizeof(seq)); + for (int i = 0; i < 1000 * 1000; i++) { + set_data(&data, i); + seq_store_data(&seq, &data); + set_data(&data, 0); + bool success = seq_try_load_data(&data, &seq); + assert_b_eq(success, true, "Failed non-racing read"); + assert_data(&data); + } +} +TEST_END + +int main(void) { + return test_no_reentrancy( + test_seq_simple, + test_seq_threaded); +} diff --git a/deps/jemalloc/test/unit/size_classes.c b/deps/jemalloc/test/unit/size_classes.c new file mode 100644 index 0000000..6947336 --- /dev/null +++ b/deps/jemalloc/test/unit/size_classes.c @@ -0,0 +1,188 @@ +#include "test/jemalloc_test.h" + +static size_t +get_max_size_class(void) { + unsigned nlextents; + size_t mib[4]; + size_t sz, miblen, max_size_class; + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, + 0), 0, "Unexpected mallctl() error"); + + miblen = sizeof(mib) / sizeof(size_t); + assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib() error"); + mib[2] = nlextents - 1; + + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, + NULL, 0), 0, "Unexpected mallctlbymib() error"); + + return max_size_class; +} + +TEST_BEGIN(test_size_classes) { + size_t size_class, max_size_class; + szind_t index, max_index; + + max_size_class = get_max_size_class(); + max_index = sz_size2index(max_size_class); + + for (index = 0, size_class = sz_index2size(index); index < max_index || + size_class < max_size_class; index++, size_class = + sz_index2size(index)) { + assert_true(index < max_index, + "Loop conditionals should be equivalent; index=%u, " + "size_class=%zu (%#zx)", index, size_class, size_class); + assert_true(size_class < max_size_class, + "Loop conditionals should be equivalent; index=%u, " + "size_class=%zu (%#zx)", index, size_class, size_class); + + assert_u_eq(index, sz_size2index(size_class), + "sz_size2index() does not reverse sz_index2size(): index=%u" + " --> size_class=%zu --> index=%u --> size_class=%zu", + index, size_class, sz_size2index(size_class), + sz_index2size(sz_size2index(size_class))); + assert_zu_eq(size_class, + sz_index2size(sz_size2index(size_class)), + "sz_index2size() does not reverse sz_size2index(): index=%u" + " --> size_class=%zu --> index=%u --> size_class=%zu", + index, size_class, sz_size2index(size_class), + sz_index2size(sz_size2index(size_class))); + + assert_u_eq(index+1, sz_size2index(size_class+1), + "Next size_class does not round up properly"); + + assert_zu_eq(size_class, (index > 0) ? + sz_s2u(sz_index2size(index-1)+1) : sz_s2u(1), + "sz_s2u() does not round up to size class"); + assert_zu_eq(size_class, sz_s2u(size_class-1), + "sz_s2u() does not round up to size class"); + assert_zu_eq(size_class, sz_s2u(size_class), + "sz_s2u() does not compute same size class"); + assert_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1), + "sz_s2u() does not round up to next size class"); + } + + assert_u_eq(index, sz_size2index(sz_index2size(index)), + "sz_size2index() does not reverse sz_index2size()"); + assert_zu_eq(max_size_class, sz_index2size( + sz_size2index(max_size_class)), + "sz_index2size() does not reverse sz_size2index()"); + + assert_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1), + "sz_s2u() does not round up to size class"); + assert_zu_eq(size_class, sz_s2u(size_class-1), + "sz_s2u() does not round up to size class"); + assert_zu_eq(size_class, sz_s2u(size_class), + "sz_s2u() does not compute same size class"); +} +TEST_END + +TEST_BEGIN(test_psize_classes) { + size_t size_class, max_psz; + pszind_t pind, max_pind; + + max_psz = get_max_size_class() + PAGE; + max_pind = sz_psz2ind(max_psz); + + for (pind = 0, size_class = sz_pind2sz(pind); + pind < max_pind || size_class < max_psz; + pind++, size_class = sz_pind2sz(pind)) { + assert_true(pind < max_pind, + "Loop conditionals should be equivalent; pind=%u, " + "size_class=%zu (%#zx)", pind, size_class, size_class); + assert_true(size_class < max_psz, + "Loop conditionals should be equivalent; pind=%u, " + "size_class=%zu (%#zx)", pind, size_class, size_class); + + assert_u_eq(pind, sz_psz2ind(size_class), + "sz_psz2ind() does not reverse sz_pind2sz(): pind=%u -->" + " size_class=%zu --> pind=%u --> size_class=%zu", pind, + size_class, sz_psz2ind(size_class), + sz_pind2sz(sz_psz2ind(size_class))); + assert_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)), + "sz_pind2sz() does not reverse sz_psz2ind(): pind=%u -->" + " size_class=%zu --> pind=%u --> size_class=%zu", pind, + size_class, sz_psz2ind(size_class), + sz_pind2sz(sz_psz2ind(size_class))); + + if (size_class == SC_LARGE_MAXCLASS) { + assert_u_eq(SC_NPSIZES, sz_psz2ind(size_class + 1), + "Next size_class does not round up properly"); + } else { + assert_u_eq(pind + 1, sz_psz2ind(size_class + 1), + "Next size_class does not round up properly"); + } + + assert_zu_eq(size_class, (pind > 0) ? + sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1), + "sz_psz2u() does not round up to size class"); + assert_zu_eq(size_class, sz_psz2u(size_class-1), + "sz_psz2u() does not round up to size class"); + assert_zu_eq(size_class, sz_psz2u(size_class), + "sz_psz2u() does not compute same size class"); + assert_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1), + "sz_psz2u() does not round up to next size class"); + } + + assert_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)), + "sz_psz2ind() does not reverse sz_pind2sz()"); + assert_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)), + "sz_pind2sz() does not reverse sz_psz2ind()"); + + assert_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1), + "sz_psz2u() does not round up to size class"); + assert_zu_eq(size_class, sz_psz2u(size_class-1), + "sz_psz2u() does not round up to size class"); + assert_zu_eq(size_class, sz_psz2u(size_class), + "sz_psz2u() does not compute same size class"); +} +TEST_END + +TEST_BEGIN(test_overflow) { + size_t max_size_class, max_psz; + + max_size_class = get_max_size_class(); + max_psz = max_size_class + PAGE; + + assert_u_eq(sz_size2index(max_size_class+1), SC_NSIZES, + "sz_size2index() should return NSIZES on overflow"); + assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES, + "sz_size2index() should return NSIZES on overflow"); + assert_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES, + "sz_size2index() should return NSIZES on overflow"); + + assert_zu_eq(sz_s2u(max_size_class+1), 0, + "sz_s2u() should return 0 for unsupported size"); + assert_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0, + "sz_s2u() should return 0 for unsupported size"); + assert_zu_eq(sz_s2u(SIZE_T_MAX), 0, + "sz_s2u() should return 0 on overflow"); + + assert_u_eq(sz_psz2ind(max_size_class+1), SC_NPSIZES, + "sz_psz2ind() should return NPSIZES on overflow"); + assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), SC_NPSIZES, + "sz_psz2ind() should return NPSIZES on overflow"); + assert_u_eq(sz_psz2ind(SIZE_T_MAX), SC_NPSIZES, + "sz_psz2ind() should return NPSIZES on overflow"); + + assert_zu_eq(sz_psz2u(max_size_class+1), max_psz, + "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported" + " size"); + assert_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz, + "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported " + "size"); + assert_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz, + "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow"); +} +TEST_END + +int +main(void) { + return test( + test_size_classes, + test_psize_classes, + test_overflow); +} diff --git a/deps/jemalloc/test/unit/slab.c b/deps/jemalloc/test/unit/slab.c new file mode 100644 index 0000000..c56af25 --- /dev/null +++ b/deps/jemalloc/test/unit/slab.c @@ -0,0 +1,33 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_arena_slab_regind) { + szind_t binind; + + for (binind = 0; binind < SC_NBINS; binind++) { + size_t regind; + extent_t slab; + const bin_info_t *bin_info = &bin_infos[binind]; + extent_init(&slab, NULL, mallocx(bin_info->slab_size, + MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true, + binind, 0, extent_state_active, false, true, true, + EXTENT_NOT_HEAD); + assert_ptr_not_null(extent_addr_get(&slab), + "Unexpected malloc() failure"); + for (regind = 0; regind < bin_info->nregs; regind++) { + void *reg = (void *)((uintptr_t)extent_addr_get(&slab) + + (bin_info->reg_size * regind)); + assert_zu_eq(arena_slab_regind(&slab, binind, reg), + regind, + "Incorrect region index computed for size %zu", + bin_info->reg_size); + } + free(extent_addr_get(&slab)); + } +} +TEST_END + +int +main(void) { + return test( + test_arena_slab_regind); +} diff --git a/deps/jemalloc/test/unit/smoothstep.c b/deps/jemalloc/test/unit/smoothstep.c new file mode 100644 index 0000000..7c5dbb7 --- /dev/null +++ b/deps/jemalloc/test/unit/smoothstep.c @@ -0,0 +1,102 @@ +#include "test/jemalloc_test.h" + +static const uint64_t smoothstep_tab[] = { +#define STEP(step, h, x, y) \ + h, + SMOOTHSTEP +#undef STEP +}; + +TEST_BEGIN(test_smoothstep_integral) { + uint64_t sum, min, max; + unsigned i; + + /* + * The integral of smoothstep in the [0..1] range equals 1/2. Verify + * that the fixed point representation's integral is no more than + * rounding error distant from 1/2. Regarding rounding, each table + * element is rounded down to the nearest fixed point value, so the + * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps. + */ + sum = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + sum += smoothstep_tab[i]; + } + + max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1); + min = max - SMOOTHSTEP_NSTEPS; + + assert_u64_ge(sum, min, + "Integral too small, even accounting for truncation"); + assert_u64_le(sum, max, "Integral exceeds 1/2"); + if (false) { + malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n", + max - sum, SMOOTHSTEP_NSTEPS); + } +} +TEST_END + +TEST_BEGIN(test_smoothstep_monotonic) { + uint64_t prev_h; + unsigned i; + + /* + * The smoothstep function is monotonic in [0..1], i.e. its slope is + * non-negative. In practice we want to parametrize table generation + * such that piecewise slope is greater than zero, but do not require + * that here. + */ + prev_h = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + uint64_t h = smoothstep_tab[i]; + assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i); + prev_h = h; + } + assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1], + (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1"); +} +TEST_END + +TEST_BEGIN(test_smoothstep_slope) { + uint64_t prev_h, prev_delta; + unsigned i; + + /* + * The smoothstep slope strictly increases until x=0.5, and then + * strictly decreases until x=1.0. Verify the slightly weaker + * requirement of monotonicity, so that inadequate table precision does + * not cause false test failures. + */ + prev_h = 0; + prev_delta = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) { + uint64_t h = smoothstep_tab[i]; + uint64_t delta = h - prev_h; + assert_u64_ge(delta, prev_delta, + "Slope must monotonically increase in 0.0 <= x <= 0.5, " + "i=%u", i); + prev_h = h; + prev_delta = delta; + } + + prev_h = KQU(1) << SMOOTHSTEP_BFP; + prev_delta = 0; + for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) { + uint64_t h = smoothstep_tab[i]; + uint64_t delta = prev_h - h; + assert_u64_ge(delta, prev_delta, + "Slope must monotonically decrease in 0.5 <= x <= 1.0, " + "i=%u", i); + prev_h = h; + prev_delta = delta; + } +} +TEST_END + +int +main(void) { + return test( + test_smoothstep_integral, + test_smoothstep_monotonic, + test_smoothstep_slope); +} diff --git a/deps/jemalloc/test/unit/spin.c b/deps/jemalloc/test/unit/spin.c new file mode 100644 index 0000000..b965f74 --- /dev/null +++ b/deps/jemalloc/test/unit/spin.c @@ -0,0 +1,18 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/spin.h" + +TEST_BEGIN(test_spin) { + spin_t spinner = SPIN_INITIALIZER; + + for (unsigned i = 0; i < 100; i++) { + spin_adaptive(&spinner); + } +} +TEST_END + +int +main(void) { + return test( + test_spin); +} diff --git a/deps/jemalloc/test/unit/stats.c b/deps/jemalloc/test/unit/stats.c new file mode 100644 index 0000000..646768e --- /dev/null +++ b/deps/jemalloc/test/unit/stats.c @@ -0,0 +1,374 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_stats_summary) { + size_t sz, allocated, active, resident, mapped; + int expected = config_stats ? 0 : ENOENT; + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL, + 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_le(allocated, active, + "allocated should be no larger than active"); + assert_zu_lt(active, resident, + "active should be less than resident"); + assert_zu_lt(active, mapped, + "active should be less than mapped"); + } +} +TEST_END + +TEST_BEGIN(test_stats_large) { + void *p; + uint64_t epoch; + size_t allocated; + uint64_t nmalloc, ndalloc, nrequests; + size_t sz; + int expected = config_stats ? 0 : ENOENT; + + p = mallocx(SC_SMALL_MAXCLASS + 1, MALLOCX_ARENA(0)); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.large.allocated", + (void *)&allocated, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.nrequests", + (void *)&nrequests, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_le(nmalloc, nrequests, + "nmalloc should no larger than nrequests"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_summary) { + void *little, *large; + uint64_t epoch; + size_t sz; + int expected = config_stats ? 0 : ENOENT; + size_t mapped; + uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; + uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; + + little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0)); + assert_ptr_not_null(little, "Unexpected mallocx() failure"); + large = mallocx((1U << SC_LG_LARGE_MINCLASS), + MALLOCX_ARENA(0)); + assert_ptr_not_null(large, "Unexpected mallocx() failure"); + + dallocx(little, 0); + dallocx(large, 0); + + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL, + 0), expected, "Unexepected mallctl() result"); + + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.dirty_npurge", + (void *)&dirty_npurge, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.dirty_nmadvise", + (void *)&dirty_nmadvise, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.dirty_purged", + (void *)&dirty_purged, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.muzzy_npurge", + (void *)&muzzy_npurge, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise", + (void *)&muzzy_nmadvise, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.muzzy_purged", + (void *)&muzzy_purged, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + + if (config_stats) { + if (!background_thread_enabled()) { + assert_u64_gt(dirty_npurge + muzzy_npurge, 0, + "At least one purge should have occurred"); + } + assert_u64_le(dirty_nmadvise, dirty_purged, + "dirty_nmadvise should be no greater than dirty_purged"); + assert_u64_le(muzzy_nmadvise, muzzy_purged, + "muzzy_nmadvise should be no greater than muzzy_purged"); + } +} +TEST_END + +void * +thd_start(void *arg) { + return NULL; +} + +static void +no_lazy_lock(void) { + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} + +TEST_BEGIN(test_stats_arenas_small) { + void *p; + size_t sz, allocated; + uint64_t epoch, nmalloc, ndalloc, nrequests; + int expected = config_stats ? 0 : ENOENT; + + no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ + + p = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0)); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.small.allocated", + (void *)&allocated, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.nrequests", + (void *)&nrequests, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_gt(nmalloc, 0, + "nmalloc should be no greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_large) { + void *p; + size_t sz, allocated; + uint64_t epoch, nmalloc, ndalloc; + int expected = config_stats ? 0 : ENOENT; + + p = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.large.allocated", + (void *)&allocated, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + } + + dallocx(p, 0); +} +TEST_END + +static void +gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) { + sprintf(cmd, "stats.arenas.%u.bins.0.%s", arena_ind, name); +} + +TEST_BEGIN(test_stats_arenas_bins) { + void *p; + size_t sz, curslabs, curregs, nonfull_slabs; + uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t nslabs, nreslabs; + int expected = config_stats ? 0 : ENOENT; + + /* Make sure allocation below isn't satisfied by tcache. */ + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); + + unsigned arena_ind, old_arena_ind; + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Arena creation failure"); + sz = sizeof(arena_ind); + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + (void *)&arena_ind, sizeof(arena_ind)), 0, + "Unexpected mallctl() failure"); + + p = malloc(bin_infos[0].reg_size); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); + + char cmd[128]; + sz = sizeof(uint64_t); + gen_mallctl_str(cmd, "nmalloc", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "ndalloc", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "nrequests", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + sz = sizeof(size_t); + gen_mallctl_str(cmd, "curregs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + sz = sizeof(uint64_t); + gen_mallctl_str(cmd, "nfills", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "nflushes", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + gen_mallctl_str(cmd, "nslabs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "nreslabs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + sz = sizeof(size_t); + gen_mallctl_str(cmd, "curslabs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "nonfull_slabs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nonfull_slabs, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + assert_zu_gt(curregs, 0, + "allocated should be greater than zero"); + if (opt_tcache) { + assert_u64_gt(nfills, 0, + "At least one fill should have occurred"); + assert_u64_gt(nflushes, 0, + "At least one flush should have occurred"); + } + assert_u64_gt(nslabs, 0, + "At least one slab should have been allocated"); + assert_zu_gt(curslabs, 0, + "At least one slab should be currently allocated"); + assert_zu_eq(nonfull_slabs, 0, + "slabs_nonfull should be empty"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_lextents) { + void *p; + uint64_t epoch, nmalloc, ndalloc; + size_t curlextents, sz, hsize; + int expected = config_stats ? 0 : ENOENT; + + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); + + p = mallocx(hsize, MALLOCX_ARENA(0)); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); + + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc", + (void *)&nmalloc, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc", + (void *)&ndalloc, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents", + (void *)&curlextents, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + if (config_stats) { + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(curlextents, 0, + "At least one extent should be currently allocated"); + } + + dallocx(p, 0); +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_stats_summary, + test_stats_large, + test_stats_arenas_summary, + test_stats_arenas_small, + test_stats_arenas_large, + test_stats_arenas_bins, + test_stats_arenas_lextents); +} diff --git a/deps/jemalloc/test/unit/stats_print.c b/deps/jemalloc/test/unit/stats_print.c new file mode 100644 index 0000000..014d002 --- /dev/null +++ b/deps/jemalloc/test/unit/stats_print.c @@ -0,0 +1,999 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/util.h" + +typedef enum { + TOKEN_TYPE_NONE, + TOKEN_TYPE_ERROR, + TOKEN_TYPE_EOI, + TOKEN_TYPE_NULL, + TOKEN_TYPE_FALSE, + TOKEN_TYPE_TRUE, + TOKEN_TYPE_LBRACKET, + TOKEN_TYPE_RBRACKET, + TOKEN_TYPE_LBRACE, + TOKEN_TYPE_RBRACE, + TOKEN_TYPE_COLON, + TOKEN_TYPE_COMMA, + TOKEN_TYPE_STRING, + TOKEN_TYPE_NUMBER +} token_type_t; + +typedef struct parser_s parser_t; +typedef struct { + parser_t *parser; + token_type_t token_type; + size_t pos; + size_t len; + size_t line; + size_t col; +} token_t; + +struct parser_s { + bool verbose; + char *buf; /* '\0'-terminated. */ + size_t len; /* Number of characters preceding '\0' in buf. */ + size_t pos; + size_t line; + size_t col; + token_t token; +}; + +static void +token_init(token_t *token, parser_t *parser, token_type_t token_type, + size_t pos, size_t len, size_t line, size_t col) { + token->parser = parser; + token->token_type = token_type; + token->pos = pos; + token->len = len; + token->line = line; + token->col = col; +} + +static void +token_error(token_t *token) { + if (!token->parser->verbose) { + return; + } + switch (token->token_type) { + case TOKEN_TYPE_NONE: + not_reached(); + case TOKEN_TYPE_ERROR: + malloc_printf("%zu:%zu: Unexpected character in token: ", + token->line, token->col); + break; + default: + malloc_printf("%zu:%zu: Unexpected token: ", token->line, + token->col); + break; + } + UNUSED ssize_t err = malloc_write_fd(STDERR_FILENO, + &token->parser->buf[token->pos], token->len); + malloc_printf("\n"); +} + +static void +parser_init(parser_t *parser, bool verbose) { + parser->verbose = verbose; + parser->buf = NULL; + parser->len = 0; + parser->pos = 0; + parser->line = 1; + parser->col = 0; +} + +static void +parser_fini(parser_t *parser) { + if (parser->buf != NULL) { + dallocx(parser->buf, MALLOCX_TCACHE_NONE); + } +} + +static bool +parser_append(parser_t *parser, const char *str) { + size_t len = strlen(str); + char *buf = (parser->buf == NULL) ? mallocx(len + 1, + MALLOCX_TCACHE_NONE) : rallocx(parser->buf, parser->len + len + 1, + MALLOCX_TCACHE_NONE); + if (buf == NULL) { + return true; + } + memcpy(&buf[parser->len], str, len + 1); + parser->buf = buf; + parser->len += len; + return false; +} + +static bool +parser_tokenize(parser_t *parser) { + enum { + STATE_START, + STATE_EOI, + STATE_N, STATE_NU, STATE_NUL, STATE_NULL, + STATE_F, STATE_FA, STATE_FAL, STATE_FALS, STATE_FALSE, + STATE_T, STATE_TR, STATE_TRU, STATE_TRUE, + STATE_LBRACKET, + STATE_RBRACKET, + STATE_LBRACE, + STATE_RBRACE, + STATE_COLON, + STATE_COMMA, + STATE_CHARS, + STATE_CHAR_ESCAPE, + STATE_CHAR_U, STATE_CHAR_UD, STATE_CHAR_UDD, STATE_CHAR_UDDD, + STATE_STRING, + STATE_MINUS, + STATE_LEADING_ZERO, + STATE_DIGITS, + STATE_DECIMAL, + STATE_FRAC_DIGITS, + STATE_EXP, + STATE_EXP_SIGN, + STATE_EXP_DIGITS, + STATE_ACCEPT + } state = STATE_START; + size_t token_pos JEMALLOC_CC_SILENCE_INIT(0); + size_t token_line JEMALLOC_CC_SILENCE_INIT(1); + size_t token_col JEMALLOC_CC_SILENCE_INIT(0); + + assert_zu_le(parser->pos, parser->len, + "Position is past end of buffer"); + + while (state != STATE_ACCEPT) { + char c = parser->buf[parser->pos]; + + switch (state) { + case STATE_START: + token_pos = parser->pos; + token_line = parser->line; + token_col = parser->col; + switch (c) { + case ' ': case '\b': case '\n': case '\r': case '\t': + break; + case '\0': + state = STATE_EOI; + break; + case 'n': + state = STATE_N; + break; + case 'f': + state = STATE_F; + break; + case 't': + state = STATE_T; + break; + case '[': + state = STATE_LBRACKET; + break; + case ']': + state = STATE_RBRACKET; + break; + case '{': + state = STATE_LBRACE; + break; + case '}': + state = STATE_RBRACE; + break; + case ':': + state = STATE_COLON; + break; + case ',': + state = STATE_COMMA; + break; + case '"': + state = STATE_CHARS; + break; + case '-': + state = STATE_MINUS; + break; + case '0': + state = STATE_LEADING_ZERO; + break; + case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + state = STATE_DIGITS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_EOI: + token_init(&parser->token, parser, + TOKEN_TYPE_EOI, token_pos, parser->pos - + token_pos, token_line, token_col); + state = STATE_ACCEPT; + break; + case STATE_N: + switch (c) { + case 'u': + state = STATE_NU; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_NU: + switch (c) { + case 'l': + state = STATE_NUL; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_NUL: + switch (c) { + case 'l': + state = STATE_NULL; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_NULL: + switch (c) { + case ' ': case '\b': case '\n': case '\r': case '\t': + case '\0': + case '[': case ']': case '{': case '}': case ':': + case ',': + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + token_init(&parser->token, parser, TOKEN_TYPE_NULL, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_F: + switch (c) { + case 'a': + state = STATE_FA; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_FA: + switch (c) { + case 'l': + state = STATE_FAL; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_FAL: + switch (c) { + case 's': + state = STATE_FALS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_FALS: + switch (c) { + case 'e': + state = STATE_FALSE; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_FALSE: + switch (c) { + case ' ': case '\b': case '\n': case '\r': case '\t': + case '\0': + case '[': case ']': case '{': case '}': case ':': + case ',': + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + token_init(&parser->token, parser, + TOKEN_TYPE_FALSE, token_pos, parser->pos - + token_pos, token_line, token_col); + state = STATE_ACCEPT; + break; + case STATE_T: + switch (c) { + case 'r': + state = STATE_TR; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_TR: + switch (c) { + case 'u': + state = STATE_TRU; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_TRU: + switch (c) { + case 'e': + state = STATE_TRUE; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_TRUE: + switch (c) { + case ' ': case '\b': case '\n': case '\r': case '\t': + case '\0': + case '[': case ']': case '{': case '}': case ':': + case ',': + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + token_init(&parser->token, parser, TOKEN_TYPE_TRUE, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_LBRACKET: + token_init(&parser->token, parser, TOKEN_TYPE_LBRACKET, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_RBRACKET: + token_init(&parser->token, parser, TOKEN_TYPE_RBRACKET, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_LBRACE: + token_init(&parser->token, parser, TOKEN_TYPE_LBRACE, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_RBRACE: + token_init(&parser->token, parser, TOKEN_TYPE_RBRACE, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_COLON: + token_init(&parser->token, parser, TOKEN_TYPE_COLON, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_COMMA: + token_init(&parser->token, parser, TOKEN_TYPE_COMMA, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_CHARS: + switch (c) { + case '\\': + state = STATE_CHAR_ESCAPE; + break; + case '"': + state = STATE_STRING; + break; + case 0x00: case 0x01: case 0x02: case 0x03: case 0x04: + case 0x05: case 0x06: case 0x07: case 0x08: case 0x09: + case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e: + case 0x0f: case 0x10: case 0x11: case 0x12: case 0x13: + case 0x14: case 0x15: case 0x16: case 0x17: case 0x18: + case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: + case 0x1e: case 0x1f: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + default: + break; + } + break; + case STATE_CHAR_ESCAPE: + switch (c) { + case '"': case '\\': case '/': case 'b': case 'n': + case 'r': case 't': + state = STATE_CHARS; + break; + case 'u': + state = STATE_CHAR_U; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_CHAR_U: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + state = STATE_CHAR_UD; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_CHAR_UD: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + state = STATE_CHAR_UDD; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_CHAR_UDD: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + state = STATE_CHAR_UDDD; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_CHAR_UDDD: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + state = STATE_CHARS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_STRING: + token_init(&parser->token, parser, TOKEN_TYPE_STRING, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_MINUS: + switch (c) { + case '0': + state = STATE_LEADING_ZERO; + break; + case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + state = STATE_DIGITS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_LEADING_ZERO: + switch (c) { + case '.': + state = STATE_DECIMAL; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_NUMBER, token_pos, parser->pos - + token_pos, token_line, token_col); + state = STATE_ACCEPT; + break; + } + break; + case STATE_DIGITS: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + break; + case '.': + state = STATE_DECIMAL; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_NUMBER, token_pos, parser->pos - + token_pos, token_line, token_col); + state = STATE_ACCEPT; + break; + } + break; + case STATE_DECIMAL: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + state = STATE_FRAC_DIGITS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_FRAC_DIGITS: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + break; + case 'e': case 'E': + state = STATE_EXP; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_NUMBER, token_pos, parser->pos - + token_pos, token_line, token_col); + state = STATE_ACCEPT; + break; + } + break; + case STATE_EXP: + switch (c) { + case '-': case '+': + state = STATE_EXP_SIGN; + break; + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + state = STATE_EXP_DIGITS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_EXP_SIGN: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + state = STATE_EXP_DIGITS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_EXP_DIGITS: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_NUMBER, token_pos, parser->pos - + token_pos, token_line, token_col); + state = STATE_ACCEPT; + break; + } + break; + default: + not_reached(); + } + + if (state != STATE_ACCEPT) { + if (c == '\n') { + parser->line++; + parser->col = 0; + } else { + parser->col++; + } + parser->pos++; + } + } + return false; +} + +static bool parser_parse_array(parser_t *parser); +static bool parser_parse_object(parser_t *parser); + +static bool +parser_parse_value(parser_t *parser) { + switch (parser->token.token_type) { + case TOKEN_TYPE_NULL: + case TOKEN_TYPE_FALSE: + case TOKEN_TYPE_TRUE: + case TOKEN_TYPE_STRING: + case TOKEN_TYPE_NUMBER: + return false; + case TOKEN_TYPE_LBRACE: + return parser_parse_object(parser); + case TOKEN_TYPE_LBRACKET: + return parser_parse_array(parser); + default: + return true; + } + not_reached(); +} + +static bool +parser_parse_pair(parser_t *parser) { + assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING, + "Pair should start with string"); + if (parser_tokenize(parser)) { + return true; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_COLON: + if (parser_tokenize(parser)) { + return true; + } + return parser_parse_value(parser); + default: + return true; + } +} + +static bool +parser_parse_values(parser_t *parser) { + if (parser_parse_value(parser)) { + return true; + } + + while (true) { + if (parser_tokenize(parser)) { + return true; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_COMMA: + if (parser_tokenize(parser)) { + return true; + } + if (parser_parse_value(parser)) { + return true; + } + break; + case TOKEN_TYPE_RBRACKET: + return false; + default: + return true; + } + } +} + +static bool +parser_parse_array(parser_t *parser) { + assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET, + "Array should start with ["); + if (parser_tokenize(parser)) { + return true; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_RBRACKET: + return false; + default: + return parser_parse_values(parser); + } + not_reached(); +} + +static bool +parser_parse_pairs(parser_t *parser) { + assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING, + "Object should start with string"); + if (parser_parse_pair(parser)) { + return true; + } + + while (true) { + if (parser_tokenize(parser)) { + return true; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_COMMA: + if (parser_tokenize(parser)) { + return true; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_STRING: + if (parser_parse_pair(parser)) { + return true; + } + break; + default: + return true; + } + break; + case TOKEN_TYPE_RBRACE: + return false; + default: + return true; + } + } +} + +static bool +parser_parse_object(parser_t *parser) { + assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE, + "Object should start with {"); + if (parser_tokenize(parser)) { + return true; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_STRING: + return parser_parse_pairs(parser); + case TOKEN_TYPE_RBRACE: + return false; + default: + return true; + } + not_reached(); +} + +static bool +parser_parse(parser_t *parser) { + if (parser_tokenize(parser)) { + goto label_error; + } + if (parser_parse_value(parser)) { + goto label_error; + } + + if (parser_tokenize(parser)) { + goto label_error; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_EOI: + return false; + default: + goto label_error; + } + not_reached(); + +label_error: + token_error(&parser->token); + return true; +} + +TEST_BEGIN(test_json_parser) { + size_t i; + const char *invalid_inputs[] = { + /* Tokenizer error case tests. */ + "{ \"string\": X }", + "{ \"string\": nXll }", + "{ \"string\": nuXl }", + "{ \"string\": nulX }", + "{ \"string\": nullX }", + "{ \"string\": fXlse }", + "{ \"string\": faXse }", + "{ \"string\": falXe }", + "{ \"string\": falsX }", + "{ \"string\": falseX }", + "{ \"string\": tXue }", + "{ \"string\": trXe }", + "{ \"string\": truX }", + "{ \"string\": trueX }", + "{ \"string\": \"\n\" }", + "{ \"string\": \"\\z\" }", + "{ \"string\": \"\\uX000\" }", + "{ \"string\": \"\\u0X00\" }", + "{ \"string\": \"\\u00X0\" }", + "{ \"string\": \"\\u000X\" }", + "{ \"string\": -X }", + "{ \"string\": 0.X }", + "{ \"string\": 0.0eX }", + "{ \"string\": 0.0e+X }", + + /* Parser error test cases. */ + "{\"string\": }", + "{\"string\" }", + "{\"string\": [ 0 }", + "{\"string\": {\"a\":0, 1 } }", + "{\"string\": {\"a\":0: } }", + "{", + "{}{", + }; + const char *valid_inputs[] = { + /* Token tests. */ + "null", + "false", + "true", + "{}", + "{\"a\": 0}", + "[]", + "[0, 1]", + "0", + "1", + "10", + "-10", + "10.23", + "10.23e4", + "10.23e-4", + "10.23e+4", + "10.23E4", + "10.23E-4", + "10.23E+4", + "-10.23", + "-10.23e4", + "-10.23e-4", + "-10.23e+4", + "-10.23E4", + "-10.23E-4", + "-10.23E+4", + "\"value\"", + "\" \\\" \\/ \\b \\n \\r \\t \\u0abc \\u1DEF \"", + + /* Parser test with various nesting. */ + "{\"a\":null, \"b\":[1,[{\"c\":2},3]], \"d\":{\"e\":true}}", + }; + + for (i = 0; i < sizeof(invalid_inputs)/sizeof(const char *); i++) { + const char *input = invalid_inputs[i]; + parser_t parser; + parser_init(&parser, false); + assert_false(parser_append(&parser, input), + "Unexpected input appending failure"); + assert_true(parser_parse(&parser), + "Unexpected parse success for input: %s", input); + parser_fini(&parser); + } + + for (i = 0; i < sizeof(valid_inputs)/sizeof(const char *); i++) { + const char *input = valid_inputs[i]; + parser_t parser; + parser_init(&parser, true); + assert_false(parser_append(&parser, input), + "Unexpected input appending failure"); + assert_false(parser_parse(&parser), + "Unexpected parse error for input: %s", input); + parser_fini(&parser); + } +} +TEST_END + +void +write_cb(void *opaque, const char *str) { + parser_t *parser = (parser_t *)opaque; + if (parser_append(parser, str)) { + test_fail("Unexpected input appending failure"); + } +} + +TEST_BEGIN(test_stats_print_json) { + const char *opts[] = { + "J", + "Jg", + "Jm", + "Jd", + "Jmd", + "Jgd", + "Jgm", + "Jgmd", + "Ja", + "Jb", + "Jl", + "Jx", + "Jbl", + "Jal", + "Jab", + "Jabl", + "Jax", + "Jbx", + "Jlx", + "Jablx", + "Jgmdablx", + }; + unsigned arena_ind, i; + + for (i = 0; i < 3; i++) { + unsigned j; + + switch (i) { + case 0: + break; + case 1: { + size_t sz = sizeof(arena_ind); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, + &sz, NULL, 0), 0, "Unexpected mallctl failure"); + break; + } case 2: { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.destroy", + mib, &miblen), 0, + "Unexpected mallctlnametomib failure"); + mib[1] = arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, + 0), 0, "Unexpected mallctlbymib failure"); + break; + } default: + not_reached(); + } + + for (j = 0; j < sizeof(opts)/sizeof(const char *); j++) { + parser_t parser; + + parser_init(&parser, true); + malloc_stats_print(write_cb, (void *)&parser, opts[j]); + assert_false(parser_parse(&parser), + "Unexpected parse error, opts=\"%s\"", opts[j]); + parser_fini(&parser); + } + } +} +TEST_END + +int +main(void) { + return test( + test_json_parser, + test_stats_print_json); +} diff --git a/deps/jemalloc/test/unit/test_hooks.c b/deps/jemalloc/test/unit/test_hooks.c new file mode 100644 index 0000000..ded8698 --- /dev/null +++ b/deps/jemalloc/test/unit/test_hooks.c @@ -0,0 +1,38 @@ +#include "test/jemalloc_test.h" + +static bool hook_called = false; + +static void +hook() { + hook_called = true; +} + +static int +func_to_hook(int arg1, int arg2) { + return arg1 + arg2; +} + +#define func_to_hook JEMALLOC_HOOK(func_to_hook, test_hooks_libc_hook) + +TEST_BEGIN(unhooked_call) { + test_hooks_libc_hook = NULL; + hook_called = false; + assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); + assert_false(hook_called, "Nulling out hook didn't take."); +} +TEST_END + +TEST_BEGIN(hooked_call) { + test_hooks_libc_hook = &hook; + hook_called = false; + assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); + assert_true(hook_called, "Hook should have executed."); +} +TEST_END + +int +main(void) { + return test( + unhooked_call, + hooked_call); +} diff --git a/deps/jemalloc/test/unit/ticker.c b/deps/jemalloc/test/unit/ticker.c new file mode 100644 index 0000000..e5790a3 --- /dev/null +++ b/deps/jemalloc/test/unit/ticker.c @@ -0,0 +1,73 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/ticker.h" + +TEST_BEGIN(test_ticker_tick) { +#define NREPS 2 +#define NTICKS 3 + ticker_t ticker; + int32_t i, j; + + ticker_init(&ticker, NTICKS); + for (i = 0; i < NREPS; i++) { + for (j = 0; j < NTICKS; j++) { + assert_u_eq(ticker_read(&ticker), NTICKS - j, + "Unexpected ticker value (i=%d, j=%d)", i, j); + assert_false(ticker_tick(&ticker), + "Unexpected ticker fire (i=%d, j=%d)", i, j); + } + assert_u32_eq(ticker_read(&ticker), 0, + "Expected ticker depletion"); + assert_true(ticker_tick(&ticker), + "Expected ticker fire (i=%d)", i); + assert_u32_eq(ticker_read(&ticker), NTICKS, + "Expected ticker reset"); + } +#undef NTICKS +} +TEST_END + +TEST_BEGIN(test_ticker_ticks) { +#define NTICKS 3 + ticker_t ticker; + + ticker_init(&ticker, NTICKS); + + assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); + assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire"); + assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value"); + assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire"); + assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); + + assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire"); + assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); +#undef NTICKS +} +TEST_END + +TEST_BEGIN(test_ticker_copy) { +#define NTICKS 3 + ticker_t ta, tb; + + ticker_init(&ta, NTICKS); + ticker_copy(&tb, &ta); + assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); + assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire"); + assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); + + ticker_tick(&ta); + ticker_copy(&tb, &ta); + assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value"); + assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire"); + assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); +#undef NTICKS +} +TEST_END + +int +main(void) { + return test( + test_ticker_tick, + test_ticker_ticks, + test_ticker_copy); +} diff --git a/deps/jemalloc/test/unit/tsd.c b/deps/jemalloc/test/unit/tsd.c new file mode 100644 index 0000000..917884d --- /dev/null +++ b/deps/jemalloc/test/unit/tsd.c @@ -0,0 +1,267 @@ +#include "test/jemalloc_test.h" + +/* + * If we're e.g. in debug mode, we *never* enter the fast path, and so shouldn't + * be asserting that we're on one. + */ +static bool originally_fast; +static int data_cleanup_count; + +void +data_cleanup(int *data) { + if (data_cleanup_count == 0) { + assert_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT, + "Argument passed into cleanup function should match tsd " + "value"); + } + ++data_cleanup_count; + + /* + * Allocate during cleanup for two rounds, in order to assure that + * jemalloc's internal tsd reinitialization happens. + */ + bool reincarnate = false; + switch (*data) { + case MALLOC_TSD_TEST_DATA_INIT: + *data = 1; + reincarnate = true; + break; + case 1: + *data = 2; + reincarnate = true; + break; + case 2: + return; + default: + not_reached(); + } + + if (reincarnate) { + void *p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpeced mallocx() failure"); + dallocx(p, 0); + } +} + +static void * +thd_start(void *arg) { + int d = (int)(uintptr_t)arg; + void *p; + + tsd_t *tsd = tsd_fetch(); + assert_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT, + "Initial tsd get should return initialization value"); + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + + tsd_test_data_set(tsd, d); + assert_x_eq(tsd_test_data_get(tsd), d, + "After tsd set, tsd get should return value that was set"); + + d = 0; + assert_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg, + "Resetting local data should have no effect on tsd"); + + tsd_test_callback_set(tsd, &data_cleanup); + + free(p); + return NULL; +} + +TEST_BEGIN(test_tsd_main_thread) { + thd_start((void *)(uintptr_t)0xa5f3e329); +} +TEST_END + +TEST_BEGIN(test_tsd_sub_thread) { + thd_t thd; + + data_cleanup_count = 0; + thd_create(&thd, thd_start, (void *)MALLOC_TSD_TEST_DATA_INIT); + thd_join(thd, NULL); + /* + * We reincarnate twice in the data cleanup, so it should execute at + * least 3 times. + */ + assert_x_ge(data_cleanup_count, 3, + "Cleanup function should have executed multiple times."); +} +TEST_END + +static void * +thd_start_reincarnated(void *arg) { + tsd_t *tsd = tsd_fetch(); + assert(tsd); + + void *p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + + /* Manually trigger reincarnation. */ + assert_ptr_not_null(tsd_arena_get(tsd), + "Should have tsd arena set."); + tsd_cleanup((void *)tsd); + assert_ptr_null(*tsd_arenap_get_unsafe(tsd), + "TSD arena should have been cleared."); + assert_u_eq(tsd_state_get(tsd), tsd_state_purgatory, + "TSD state should be purgatory\n"); + + free(p); + assert_u_eq(tsd_state_get(tsd), tsd_state_reincarnated, + "TSD state should be reincarnated\n"); + p = mallocx(1, MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + assert_ptr_null(*tsd_arenap_get_unsafe(tsd), + "Should not have tsd arena set after reincarnation."); + + free(p); + tsd_cleanup((void *)tsd); + assert_ptr_null(*tsd_arenap_get_unsafe(tsd), + "TSD arena should have been cleared after 2nd cleanup."); + + return NULL; +} + +TEST_BEGIN(test_tsd_reincarnation) { + thd_t thd; + thd_create(&thd, thd_start_reincarnated, NULL); + thd_join(thd, NULL); +} +TEST_END + +typedef struct { + atomic_u32_t phase; + atomic_b_t error; +} global_slow_data_t; + +static void * +thd_start_global_slow(void *arg) { + /* PHASE 0 */ + global_slow_data_t *data = (global_slow_data_t *)arg; + free(mallocx(1, 0)); + + tsd_t *tsd = tsd_fetch(); + /* + * No global slowness has happened yet; there was an error if we were + * originally fast but aren't now. + */ + atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd), + ATOMIC_SEQ_CST); + atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST); + + /* PHASE 2 */ + while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 2) { + } + free(mallocx(1, 0)); + atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST); + atomic_store_u32(&data->phase, 3, ATOMIC_SEQ_CST); + + /* PHASE 4 */ + while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 4) { + } + free(mallocx(1, 0)); + atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST); + atomic_store_u32(&data->phase, 5, ATOMIC_SEQ_CST); + + /* PHASE 6 */ + while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 6) { + } + free(mallocx(1, 0)); + /* Only one decrement so far. */ + atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST); + atomic_store_u32(&data->phase, 7, ATOMIC_SEQ_CST); + + /* PHASE 8 */ + while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 8) { + } + free(mallocx(1, 0)); + /* + * Both decrements happened; we should be fast again (if we ever + * were) + */ + atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd), + ATOMIC_SEQ_CST); + atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST); + + return NULL; +} + +TEST_BEGIN(test_tsd_global_slow) { + global_slow_data_t data = {ATOMIC_INIT(0), ATOMIC_INIT(false)}; + /* + * Note that the "mallocx" here (vs. malloc) is important, since the + * compiler is allowed to optimize away free(malloc(1)) but not + * free(mallocx(1)). + */ + free(mallocx(1, 0)); + tsd_t *tsd = tsd_fetch(); + originally_fast = tsd_fast(tsd); + + thd_t thd; + thd_create(&thd, thd_start_global_slow, (void *)&data.phase); + /* PHASE 1 */ + while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 1) { + /* + * We don't have a portable condvar/semaphore mechanism. + * Spin-wait. + */ + } + assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); + tsd_global_slow_inc(tsd_tsdn(tsd)); + free(mallocx(1, 0)); + assert_false(tsd_fast(tsd), ""); + atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST); + + /* PHASE 3 */ + while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) { + } + assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); + /* Increase again, so that we can test multiple fast/slow changes. */ + tsd_global_slow_inc(tsd_tsdn(tsd)); + atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST); + free(mallocx(1, 0)); + assert_false(tsd_fast(tsd), ""); + + /* PHASE 5 */ + while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) { + } + assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); + tsd_global_slow_dec(tsd_tsdn(tsd)); + atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST); + /* We only decreased once; things should still be slow. */ + free(mallocx(1, 0)); + assert_false(tsd_fast(tsd), ""); + + /* PHASE 7 */ + while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) { + } + assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); + tsd_global_slow_dec(tsd_tsdn(tsd)); + atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST); + /* We incremented and then decremented twice; we should be fast now. */ + free(mallocx(1, 0)); + assert_true(!originally_fast || tsd_fast(tsd), ""); + + /* PHASE 9 */ + while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) { + } + assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); + + thd_join(thd, NULL); +} +TEST_END + +int +main(void) { + /* Ensure tsd bootstrapped. */ + if (nallocx(1, 0) == 0) { + malloc_printf("Initialization error"); + return test_status_fail; + } + + return test_no_reentrancy( + test_tsd_main_thread, + test_tsd_sub_thread, + test_tsd_reincarnation, + test_tsd_global_slow); +} diff --git a/deps/jemalloc/test/unit/witness.c b/deps/jemalloc/test/unit/witness.c new file mode 100644 index 0000000..5986da4 --- /dev/null +++ b/deps/jemalloc/test/unit/witness.c @@ -0,0 +1,280 @@ +#include "test/jemalloc_test.h" + +static witness_lock_error_t *witness_lock_error_orig; +static witness_owner_error_t *witness_owner_error_orig; +static witness_not_owner_error_t *witness_not_owner_error_orig; +static witness_depth_error_t *witness_depth_error_orig; + +static bool saw_lock_error; +static bool saw_owner_error; +static bool saw_not_owner_error; +static bool saw_depth_error; + +static void +witness_lock_error_intercept(const witness_list_t *witnesses, + const witness_t *witness) { + saw_lock_error = true; +} + +static void +witness_owner_error_intercept(const witness_t *witness) { + saw_owner_error = true; +} + +static void +witness_not_owner_error_intercept(const witness_t *witness) { + saw_not_owner_error = true; +} + +static void +witness_depth_error_intercept(const witness_list_t *witnesses, + witness_rank_t rank_inclusive, unsigned depth) { + saw_depth_error = true; +} + +static int +witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) { + assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); + + assert(oa == (void *)a); + assert(ob == (void *)b); + + return strcmp(a->name, b->name); +} + +static int +witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b, + void *ob) { + assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); + + assert(oa == (void *)a); + assert(ob == (void *)b); + + return -strcmp(a->name, b->name); +} + +TEST_BEGIN(test_witness) { + witness_t a, b; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + + test_skip_if(!config_debug); + + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0); + + witness_init(&a, "a", 1, NULL, NULL); + witness_assert_not_owner(&witness_tsdn, &a); + witness_lock(&witness_tsdn, &a); + witness_assert_owner(&witness_tsdn, &a); + witness_assert_depth(&witness_tsdn, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 0); + + witness_init(&b, "b", 2, NULL, NULL); + witness_assert_not_owner(&witness_tsdn, &b); + witness_lock(&witness_tsdn, &b); + witness_assert_owner(&witness_tsdn, &b); + witness_assert_depth(&witness_tsdn, 2); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 2); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0); + + witness_unlock(&witness_tsdn, &a); + witness_assert_depth(&witness_tsdn, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0); + witness_unlock(&witness_tsdn, &b); + + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0); +} +TEST_END + +TEST_BEGIN(test_witness_comp) { + witness_t a, b, c, d; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + + test_skip_if(!config_debug); + + witness_assert_lockless(&witness_tsdn); + + witness_init(&a, "a", 1, witness_comp, &a); + witness_assert_not_owner(&witness_tsdn, &a); + witness_lock(&witness_tsdn, &a); + witness_assert_owner(&witness_tsdn, &a); + witness_assert_depth(&witness_tsdn, 1); + + witness_init(&b, "b", 1, witness_comp, &b); + witness_assert_not_owner(&witness_tsdn, &b); + witness_lock(&witness_tsdn, &b); + witness_assert_owner(&witness_tsdn, &b); + witness_assert_depth(&witness_tsdn, 2); + witness_unlock(&witness_tsdn, &b); + witness_assert_depth(&witness_tsdn, 1); + + witness_lock_error_orig = witness_lock_error; + witness_lock_error = witness_lock_error_intercept; + saw_lock_error = false; + + witness_init(&c, "c", 1, witness_comp_reverse, &c); + witness_assert_not_owner(&witness_tsdn, &c); + assert_false(saw_lock_error, "Unexpected witness lock error"); + witness_lock(&witness_tsdn, &c); + assert_true(saw_lock_error, "Expected witness lock error"); + witness_unlock(&witness_tsdn, &c); + witness_assert_depth(&witness_tsdn, 1); + + saw_lock_error = false; + + witness_init(&d, "d", 1, NULL, NULL); + witness_assert_not_owner(&witness_tsdn, &d); + assert_false(saw_lock_error, "Unexpected witness lock error"); + witness_lock(&witness_tsdn, &d); + assert_true(saw_lock_error, "Expected witness lock error"); + witness_unlock(&witness_tsdn, &d); + witness_assert_depth(&witness_tsdn, 1); + + witness_unlock(&witness_tsdn, &a); + + witness_assert_lockless(&witness_tsdn); + + witness_lock_error = witness_lock_error_orig; +} +TEST_END + +TEST_BEGIN(test_witness_reversal) { + witness_t a, b; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + + test_skip_if(!config_debug); + + witness_lock_error_orig = witness_lock_error; + witness_lock_error = witness_lock_error_intercept; + saw_lock_error = false; + + witness_assert_lockless(&witness_tsdn); + + witness_init(&a, "a", 1, NULL, NULL); + witness_init(&b, "b", 2, NULL, NULL); + + witness_lock(&witness_tsdn, &b); + witness_assert_depth(&witness_tsdn, 1); + assert_false(saw_lock_error, "Unexpected witness lock error"); + witness_lock(&witness_tsdn, &a); + assert_true(saw_lock_error, "Expected witness lock error"); + + witness_unlock(&witness_tsdn, &a); + witness_assert_depth(&witness_tsdn, 1); + witness_unlock(&witness_tsdn, &b); + + witness_assert_lockless(&witness_tsdn); + + witness_lock_error = witness_lock_error_orig; +} +TEST_END + +TEST_BEGIN(test_witness_recursive) { + witness_t a; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + + test_skip_if(!config_debug); + + witness_not_owner_error_orig = witness_not_owner_error; + witness_not_owner_error = witness_not_owner_error_intercept; + saw_not_owner_error = false; + + witness_lock_error_orig = witness_lock_error; + witness_lock_error = witness_lock_error_intercept; + saw_lock_error = false; + + witness_assert_lockless(&witness_tsdn); + + witness_init(&a, "a", 1, NULL, NULL); + + witness_lock(&witness_tsdn, &a); + assert_false(saw_lock_error, "Unexpected witness lock error"); + assert_false(saw_not_owner_error, "Unexpected witness not owner error"); + witness_lock(&witness_tsdn, &a); + assert_true(saw_lock_error, "Expected witness lock error"); + assert_true(saw_not_owner_error, "Expected witness not owner error"); + + witness_unlock(&witness_tsdn, &a); + + witness_assert_lockless(&witness_tsdn); + + witness_owner_error = witness_owner_error_orig; + witness_lock_error = witness_lock_error_orig; + +} +TEST_END + +TEST_BEGIN(test_witness_unlock_not_owned) { + witness_t a; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + + test_skip_if(!config_debug); + + witness_owner_error_orig = witness_owner_error; + witness_owner_error = witness_owner_error_intercept; + saw_owner_error = false; + + witness_assert_lockless(&witness_tsdn); + + witness_init(&a, "a", 1, NULL, NULL); + + assert_false(saw_owner_error, "Unexpected owner error"); + witness_unlock(&witness_tsdn, &a); + assert_true(saw_owner_error, "Expected owner error"); + + witness_assert_lockless(&witness_tsdn); + + witness_owner_error = witness_owner_error_orig; +} +TEST_END + +TEST_BEGIN(test_witness_depth) { + witness_t a; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + + test_skip_if(!config_debug); + + witness_depth_error_orig = witness_depth_error; + witness_depth_error = witness_depth_error_intercept; + saw_depth_error = false; + + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + + witness_init(&a, "a", 1, NULL, NULL); + + assert_false(saw_depth_error, "Unexpected depth error"); + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + + witness_lock(&witness_tsdn, &a); + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + assert_true(saw_depth_error, "Expected depth error"); + + witness_unlock(&witness_tsdn, &a); + + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + + witness_depth_error = witness_depth_error_orig; +} +TEST_END + +int +main(void) { + return test( + test_witness, + test_witness_comp, + test_witness_reversal, + test_witness_recursive, + test_witness_unlock_not_owned, + test_witness_depth); +} diff --git a/deps/jemalloc/test/unit/zero.c b/deps/jemalloc/test/unit/zero.c new file mode 100644 index 0000000..271fd5c --- /dev/null +++ b/deps/jemalloc/test/unit/zero.c @@ -0,0 +1,59 @@ +#include "test/jemalloc_test.h" + +static void +test_zero(size_t sz_min, size_t sz_max) { + uint8_t *s; + size_t sz_prev, sz, i; +#define MAGIC ((uint8_t)0x61) + + sz_prev = 0; + s = (uint8_t *)mallocx(sz_min, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + + for (sz = sallocx(s, 0); sz <= sz_max; + sz_prev = sz, sz = sallocx(s, 0)) { + if (sz_prev > 0) { + assert_u_eq(s[0], MAGIC, + "Previously allocated byte %zu/%zu is corrupted", + ZU(0), sz_prev); + assert_u_eq(s[sz_prev-1], MAGIC, + "Previously allocated byte %zu/%zu is corrupted", + sz_prev-1, sz_prev); + } + + for (i = sz_prev; i < sz; i++) { + assert_u_eq(s[i], 0x0, + "Newly allocated byte %zu/%zu isn't zero-filled", + i, sz); + s[i] = MAGIC; + } + + if (xallocx(s, sz+1, 0, 0) == sz) { + s = (uint8_t *)rallocx(s, sz+1, 0); + assert_ptr_not_null((void *)s, + "Unexpected rallocx() failure"); + } + } + + dallocx(s, 0); +#undef MAGIC +} + +TEST_BEGIN(test_zero_small) { + test_skip_if(!config_fill); + test_zero(1, SC_SMALL_MAXCLASS - 1); +} +TEST_END + +TEST_BEGIN(test_zero_large) { + test_skip_if(!config_fill); + test_zero(SC_SMALL_MAXCLASS + 1, 1U << (SC_LG_LARGE_MINCLASS + 1)); +} +TEST_END + +int +main(void) { + return test( + test_zero_small, + test_zero_large); +} diff --git a/deps/jemalloc/test/unit/zero.sh b/deps/jemalloc/test/unit/zero.sh new file mode 100644 index 0000000..b4540b2 --- /dev/null +++ b/deps/jemalloc/test/unit/zero.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="abort:false,junk:false,zero:true" +fi diff --git a/deps/linenoise/.gitignore b/deps/linenoise/.gitignore new file mode 100644 index 0000000..7ab7825 --- /dev/null +++ b/deps/linenoise/.gitignore @@ -0,0 +1,3 @@ +linenoise_example +*.dSYM +history.txt diff --git a/deps/linenoise/Makefile b/deps/linenoise/Makefile new file mode 100644 index 0000000..1dd894b --- /dev/null +++ b/deps/linenoise/Makefile @@ -0,0 +1,21 @@ +STD= +WARN= -Wall +OPT= -Os + +R_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) +R_LDFLAGS= $(LDFLAGS) +DEBUG= -g + +R_CC=$(CC) $(R_CFLAGS) +R_LD=$(CC) $(R_LDFLAGS) + +linenoise.o: linenoise.h linenoise.c + +linenoise_example: linenoise.o example.o + $(R_LD) -o $@ $^ + +.c.o: + $(R_CC) -c $< + +clean: + rm -f linenoise_example *.o diff --git a/deps/linenoise/README.markdown b/deps/linenoise/README.markdown new file mode 100644 index 0000000..1afea2a --- /dev/null +++ b/deps/linenoise/README.markdown @@ -0,0 +1,247 @@ +# Linenoise + +A minimal, zero-config, BSD licensed, readline replacement used in Redis, +MongoDB, and Android. + +* Single and multi line editing mode with the usual key bindings implemented. +* History handling. +* Completion. +* Hints (suggestions at the right of the prompt as you type). +* About 1,100 lines of BSD license source code. +* Only uses a subset of VT100 escapes (ANSI.SYS compatible). + +## Can a line editing library be 20k lines of code? + +Line editing with some support for history is a really important feature for command line utilities. Instead of retyping almost the same stuff again and again it's just much better to hit the up arrow and edit on syntax errors, or in order to try a slightly different command. But apparently code dealing with terminals is some sort of Black Magic: readline is 30k lines of code, libedit 20k. Is it reasonable to link small utilities to huge libraries just to get a minimal support for line editing? + +So what usually happens is either: + + * Large programs with configure scripts disabling line editing if readline is not present in the system, or not supporting it at all since readline is GPL licensed and libedit (the BSD clone) is not as known and available as readline is (Real world example of this problem: Tclsh). + * Smaller programs not using a configure script not supporting line editing at all (A problem we had with Redis-cli for instance). + +The result is a pollution of binaries without line editing support. + +So I spent more or less two hours doing a reality check resulting in this little library: is it *really* needed for a line editing library to be 20k lines of code? Apparently not, it is possibe to get a very small, zero configuration, trivial to embed library, that solves the problem. Smaller programs will just include this, supporting line editing out of the box. Larger programs may use this little library or just checking with configure if readline/libedit is available and resorting to Linenoise if not. + +## Terminals, in 2010. + +Apparently almost every terminal you can happen to use today has some kind of support for basic VT100 escape sequences. So I tried to write a lib using just very basic VT100 features. The resulting library appears to work everywhere I tried to use it, and now can work even on ANSI.SYS compatible terminals, since no +VT220 specific sequences are used anymore. + +The library is currently about 1100 lines of code. In order to use it in your project just look at the *example.c* file in the source distribution, it is trivial. Linenoise is BSD code, so you can use both in free software and commercial software. + +## Tested with... + + * Linux text only console ($TERM = linux) + * Linux KDE terminal application ($TERM = xterm) + * Linux xterm ($TERM = xterm) + * Linux Buildroot ($TERM = vt100) + * Mac OS X iTerm ($TERM = xterm) + * Mac OS X default Terminal.app ($TERM = xterm) + * OpenBSD 4.5 through an OSX Terminal.app ($TERM = screen) + * IBM AIX 6.1 + * FreeBSD xterm ($TERM = xterm) + * ANSI.SYS + * Emacs comint mode ($TERM = dumb) + +Please test it everywhere you can and report back! + +## Let's push this forward! + +Patches should be provided in the respect of Linenoise sensibility for small +easy to understand code. + +Send feedbacks to antirez at gmail + +# The API + +Linenoise is very easy to use, and reading the example shipped with the +library should get you up to speed ASAP. Here is a list of API calls +and how to use them. + + char *linenoise(const char *prompt); + +This is the main Linenoise call: it shows the user a prompt with line editing +and history capabilities. The prompt you specify is used as a prompt, that is, +it will be printed to the left of the cursor. The library returns a buffer +with the line composed by the user, or NULL on end of file or when there +is an out of memory condition. + +When a tty is detected (the user is actually typing into a terminal session) +the maximum editable line length is `LINENOISE_MAX_LINE`. When instead the +standard input is not a tty, which happens every time you redirect a file +to a program, or use it in an Unix pipeline, there are no limits to the +length of the line that can be returned. + +The returned line should be freed with the `free()` standard system call. +However sometimes it could happen that your program uses a different dynamic +allocation library, so you may also used `linenoiseFree` to make sure the +line is freed with the same allocator it was created. + +The canonical loop used by a program using Linenoise will be something like +this: + + while((line = linenoise("hello> ")) != NULL) { + printf("You wrote: %s\n", line); + linenoiseFree(line); /* Or just free(line) if you use libc malloc. */ + } + +## Single line VS multi line editing + +By default, Linenoise uses single line editing, that is, a single row on the +screen will be used, and as the user types more, the text will scroll towards +left to make room. This works if your program is one where the user is +unlikely to write a lot of text, otherwise multi line editing, where multiple +screens rows are used, can be a lot more comfortable. + +In order to enable multi line editing use the following API call: + + linenoiseSetMultiLine(1); + +You can disable it using `0` as argument. + +## History + +Linenoise supporst history, so that the user does not have to retype +again and again the same things, but can use the down and up arrows in order +to search and re-edit already inserted lines of text. + +The followings are the history API calls: + + int linenoiseHistoryAdd(const char *line); + int linenoiseHistorySetMaxLen(int len); + int linenoiseHistorySave(const char *filename); + int linenoiseHistoryLoad(const char *filename); + +Use `linenoiseHistoryAdd` every time you want to add a new element +to the top of the history (it will be the first the user will see when +using the up arrow). + +Note that for history to work, you have to set a length for the history +(which is zero by default, so history will be disabled if you don't set +a proper one). This is accomplished using the `linenoiseHistorySetMaxLen` +function. + +Linenoise has direct support for persisting the history into an history +file. The functions `linenoiseHistorySave` and `linenoiseHistoryLoad` do +just that. Both functions return -1 on error and 0 on success. + +## Mask mode + +Sometimes it is useful to allow the user to type passwords or other +secrets that should not be displayed. For such situations linenoise supports +a "mask mode" that will just replace the characters the user is typing +with `*` characters, like in the following example: + + $ ./linenoise_example + hello> get mykey + echo: 'get mykey' + hello> /mask + hello> ********* + +You can enable and disable mask mode using the following two functions: + + void linenoiseMaskModeEnable(void); + void linenoiseMaskModeDisable(void); + +## Completion + +Linenoise supports completion, which is the ability to complete the user +input when she or he presses the `` key. + +In order to use completion, you need to register a completion callback, which +is called every time the user presses ``. Your callback will return a +list of items that are completions for the current string. + +The following is an example of registering a completion callback: + + linenoiseSetCompletionCallback(completion); + +The completion must be a function returning `void` and getting as input +a `const char` pointer, which is the line the user has typed so far, and +a `linenoiseCompletions` object pointer, which is used as argument of +`linenoiseAddCompletion` in order to add completions inside the callback. +An example will make it more clear: + + void completion(const char *buf, linenoiseCompletions *lc) { + if (buf[0] == 'h') { + linenoiseAddCompletion(lc,"hello"); + linenoiseAddCompletion(lc,"hello there"); + } + } + +Basically in your completion callback, you inspect the input, and return +a list of items that are good completions by using `linenoiseAddCompletion`. + +If you want to test the completion feature, compile the example program +with `make`, run it, type `h` and press ``. + +## Hints + +Linenoise has a feature called *hints* which is very useful when you +use Linenoise in order to implement a REPL (Read Eval Print Loop) for +a program that accepts commands and arguments, but may also be useful in +other conditions. + +The feature shows, on the right of the cursor, as the user types, hints that +may be useful. The hints can be displayed using a different color compared +to the color the user is typing, and can also be bold. + +For example as the user starts to type `"git remote add"`, with hints it's +possible to show on the right of the prompt a string ` `. + +The feature works similarly to the history feature, using a callback. +To register the callback we use: + + linenoiseSetHintsCallback(hints); + +The callback itself is implemented like this: + + char *hints(const char *buf, int *color, int *bold) { + if (!strcasecmp(buf,"git remote add")) { + *color = 35; + *bold = 0; + return " "; + } + return NULL; + } + +The callback function returns the string that should be displayed or NULL +if no hint is available for the text the user currently typed. The returned +string will be trimmed as needed depending on the number of columns available +on the screen. + +It is possible to return a string allocated in dynamic way, by also registering +a function to deallocate the hint string once used: + + void linenoiseSetFreeHintsCallback(linenoiseFreeHintsCallback *); + +The free hint callback will just receive the pointer and free the string +as needed (depending on how the hits callback allocated it). + +As you can see in the example above, a `color` (in xterm color terminal codes) +can be provided together with a `bold` attribute. If no color is set, the +current terminal foreground color is used. If no bold attribute is set, +non-bold text is printed. + +Color codes are: + + red = 31 + green = 32 + yellow = 33 + blue = 34 + magenta = 35 + cyan = 36 + white = 37; + +## Screen handling + +Sometimes you may want to clear the screen as a result of something the +user typed. You can do this by calling the following function: + + void linenoiseClearScreen(void); + +## Related projects + +* [Linenoise NG](https://github.com/arangodb/linenoise-ng) is a fork of Linenoise that aims to add more advanced features like UTF-8 support, Windows support and other features. Uses C++ instead of C as development language. +* [Linenoise-swift](https://github.com/andybest/linenoise-swift) is a reimplementation of Linenoise written in Swift. diff --git a/deps/linenoise/example.c b/deps/linenoise/example.c new file mode 100644 index 0000000..74358c3 --- /dev/null +++ b/deps/linenoise/example.c @@ -0,0 +1,79 @@ +#include +#include +#include +#include "linenoise.h" + + +void completion(const char *buf, linenoiseCompletions *lc) { + if (buf[0] == 'h') { + linenoiseAddCompletion(lc,"hello"); + linenoiseAddCompletion(lc,"hello there"); + } +} + +char *hints(const char *buf, int *color, int *bold) { + if (!strcasecmp(buf,"hello")) { + *color = 35; + *bold = 0; + return " World"; + } + return NULL; +} + +int main(int argc, char **argv) { + char *line; + char *prgname = argv[0]; + + /* Parse options, with --multiline we enable multi line editing. */ + while(argc > 1) { + argc--; + argv++; + if (!strcmp(*argv,"--multiline")) { + linenoiseSetMultiLine(1); + printf("Multi-line mode enabled.\n"); + } else if (!strcmp(*argv,"--keycodes")) { + linenoisePrintKeyCodes(); + exit(0); + } else { + fprintf(stderr, "Usage: %s [--multiline] [--keycodes]\n", prgname); + exit(1); + } + } + + /* Set the completion callback. This will be called every time the + * user uses the key. */ + linenoiseSetCompletionCallback(completion); + linenoiseSetHintsCallback(hints); + + /* Load history from file. The history file is just a plain text file + * where entries are separated by newlines. */ + linenoiseHistoryLoad("history.txt"); /* Load the history at startup */ + + /* Now this is the main loop of the typical linenoise-based application. + * The call to linenoise() will block as long as the user types something + * and presses enter. + * + * The typed string is returned as a malloc() allocated string by + * linenoise, so the user needs to free() it. */ + + while((line = linenoise("hello> ")) != NULL) { + /* Do something with the string. */ + if (line[0] != '\0' && line[0] != '/') { + printf("echo: '%s'\n", line); + linenoiseHistoryAdd(line); /* Add to the history. */ + linenoiseHistorySave("history.txt"); /* Save the history on disk. */ + } else if (!strncmp(line,"/historylen",11)) { + /* The "/historylen" command will change the history len. */ + int len = atoi(line+11); + linenoiseHistorySetMaxLen(len); + } else if (!strncmp(line, "/mask", 5)) { + linenoiseMaskModeEnable(); + } else if (!strncmp(line, "/unmask", 7)) { + linenoiseMaskModeDisable(); + } else if (line[0] == '/') { + printf("Unreconized command: %s\n", line); + } + free(line); + } + return 0; +} diff --git a/deps/linenoise/linenoise.c b/deps/linenoise/linenoise.c new file mode 100644 index 0000000..1b01622 --- /dev/null +++ b/deps/linenoise/linenoise.c @@ -0,0 +1,1227 @@ +/* linenoise.c -- guerrilla line editing library against the idea that a + * line editing lib needs to be 20,000 lines of C code. + * + * You can find the latest source code at: + * + * http://github.com/antirez/linenoise + * + * Does a number of crazy assumptions that happen to be true in 99.9999% of + * the 2010 UNIX computers around. + * + * ------------------------------------------------------------------------ + * + * Copyright (c) 2010-2016, Salvatore Sanfilippo + * Copyright (c) 2010-2013, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * ------------------------------------------------------------------------ + * + * References: + * - http://invisible-island.net/xterm/ctlseqs/ctlseqs.html + * - http://www.3waylabs.com/nw/WWW/products/wizcon/vt220.html + * + * Todo list: + * - Filter bogus Ctrl+ combinations. + * - Win32 support + * + * Bloat: + * - History search like Ctrl+r in readline? + * + * List of escape sequences used by this program, we do everything just + * with three sequences. In order to be so cheap we may have some + * flickering effect with some slow terminal, but the lesser sequences + * the more compatible. + * + * EL (Erase Line) + * Sequence: ESC [ n K + * Effect: if n is 0 or missing, clear from cursor to end of line + * Effect: if n is 1, clear from beginning of line to cursor + * Effect: if n is 2, clear entire line + * + * CUF (CUrsor Forward) + * Sequence: ESC [ n C + * Effect: moves cursor forward n chars + * + * CUB (CUrsor Backward) + * Sequence: ESC [ n D + * Effect: moves cursor backward n chars + * + * The following is used to get the terminal width if getting + * the width with the TIOCGWINSZ ioctl fails + * + * DSR (Device Status Report) + * Sequence: ESC [ 6 n + * Effect: reports the current cusor position as ESC [ n ; m R + * where n is the row and m is the column + * + * When multi line mode is enabled, we also use an additional escape + * sequence. However multi line editing is disabled by default. + * + * CUU (Cursor Up) + * Sequence: ESC [ n A + * Effect: moves cursor up of n chars. + * + * CUD (Cursor Down) + * Sequence: ESC [ n B + * Effect: moves cursor down of n chars. + * + * When linenoiseClearScreen() is called, two additional escape sequences + * are used in order to clear the screen and position the cursor at home + * position. + * + * CUP (Cursor position) + * Sequence: ESC [ H + * Effect: moves the cursor to upper left corner + * + * ED (Erase display) + * Sequence: ESC [ 2 J + * Effect: clear the whole screen + * + */ + +#define _DEFAULT_SOURCE /* For fchmod() */ +#define _BSD_SOURCE /* For fchmod() */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "linenoise.h" + +#define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100 +#define LINENOISE_MAX_LINE 4096 +static char *unsupported_term[] = {"dumb","cons25","emacs",NULL}; +static linenoiseCompletionCallback *completionCallback = NULL; +static linenoiseHintsCallback *hintsCallback = NULL; +static linenoiseFreeHintsCallback *freeHintsCallback = NULL; + +static struct termios orig_termios; /* In order to restore at exit.*/ +static int maskmode = 0; /* Show "***" instead of input. For passwords. */ +static int rawmode = 0; /* For atexit() function to check if restore is needed*/ +static int mlmode = 0; /* Multi line mode. Default is single line. */ +static int atexit_registered = 0; /* Register atexit just 1 time. */ +static int history_max_len = LINENOISE_DEFAULT_HISTORY_MAX_LEN; +static int history_len = 0; +static char **history = NULL; + +/* The linenoiseState structure represents the state during line editing. + * We pass this state to functions implementing specific editing + * functionalities. */ +struct linenoiseState { + int ifd; /* Terminal stdin file descriptor. */ + int ofd; /* Terminal stdout file descriptor. */ + char *buf; /* Edited line buffer. */ + size_t buflen; /* Edited line buffer size. */ + const char *prompt; /* Prompt to display. */ + size_t plen; /* Prompt length. */ + size_t pos; /* Current cursor position. */ + size_t oldpos; /* Previous refresh cursor position. */ + size_t len; /* Current edited line length. */ + size_t cols; /* Number of columns in terminal. */ + size_t maxrows; /* Maximum num of rows used so far (multiline mode) */ + int history_index; /* The history index we are currently editing. */ +}; + +enum KEY_ACTION{ + KEY_NULL = 0, /* NULL */ + CTRL_A = 1, /* Ctrl+a */ + CTRL_B = 2, /* Ctrl-b */ + CTRL_C = 3, /* Ctrl-c */ + CTRL_D = 4, /* Ctrl-d */ + CTRL_E = 5, /* Ctrl-e */ + CTRL_F = 6, /* Ctrl-f */ + CTRL_H = 8, /* Ctrl-h */ + TAB = 9, /* Tab */ + CTRL_K = 11, /* Ctrl+k */ + CTRL_L = 12, /* Ctrl+l */ + ENTER = 13, /* Enter */ + CTRL_N = 14, /* Ctrl-n */ + CTRL_P = 16, /* Ctrl-p */ + CTRL_T = 20, /* Ctrl-t */ + CTRL_U = 21, /* Ctrl+u */ + CTRL_W = 23, /* Ctrl+w */ + ESC = 27, /* Escape */ + BACKSPACE = 127 /* Backspace */ +}; + +static void linenoiseAtExit(void); +int linenoiseHistoryAdd(const char *line); +static void refreshLine(struct linenoiseState *l); + +/* Debugging macro. */ +#if 0 +FILE *lndebug_fp = NULL; +#define lndebug(...) \ + do { \ + if (lndebug_fp == NULL) { \ + lndebug_fp = fopen("/tmp/lndebug.txt","a"); \ + fprintf(lndebug_fp, \ + "[%d %d %d] p: %d, rows: %d, rpos: %d, max: %d, oldmax: %d\n", \ + (int)l->len,(int)l->pos,(int)l->oldpos,plen,rows,rpos, \ + (int)l->maxrows,old_rows); \ + } \ + fprintf(lndebug_fp, ", " __VA_ARGS__); \ + fflush(lndebug_fp); \ + } while (0) +#else +#define lndebug(fmt, ...) +#endif + +/* ======================= Low level terminal handling ====================== */ + +/* Enable "mask mode". When it is enabled, instead of the input that + * the user is typing, the terminal will just display a corresponding + * number of asterisks, like "****". This is useful for passwords and other + * secrets that should not be displayed. */ +void linenoiseMaskModeEnable(void) { + maskmode = 1; +} + +/* Disable mask mode. */ +void linenoiseMaskModeDisable(void) { + maskmode = 0; +} + +/* Set if to use or not the multi line mode. */ +void linenoiseSetMultiLine(int ml) { + mlmode = ml; +} + +/* Return true if the terminal name is in the list of terminals we know are + * not able to understand basic escape sequences. */ +static int isUnsupportedTerm(void) { + char *term = getenv("TERM"); + int j; + + if (term == NULL) return 0; + for (j = 0; unsupported_term[j]; j++) + if (!strcasecmp(term,unsupported_term[j])) return 1; + return 0; +} + +/* Raw mode: 1960 magic shit. */ +static int enableRawMode(int fd) { + struct termios raw; + + if (!isatty(STDIN_FILENO)) goto fatal; + if (!atexit_registered) { + atexit(linenoiseAtExit); + atexit_registered = 1; + } + if (tcgetattr(fd,&orig_termios) == -1) goto fatal; + + raw = orig_termios; /* modify the original mode */ + /* input modes: no break, no CR to NL, no parity check, no strip char, + * no start/stop output control. */ + raw.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON); + /* output modes - disable post processing */ + raw.c_oflag &= ~(OPOST); + /* control modes - set 8 bit chars */ + raw.c_cflag |= (CS8); + /* local modes - choing off, canonical off, no extended functions, + * no signal chars (^Z,^C) */ + raw.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG); + /* control chars - set return condition: min number of bytes and timer. + * We want read to return every single byte, without timeout. */ + raw.c_cc[VMIN] = 1; raw.c_cc[VTIME] = 0; /* 1 byte, no timer */ + + /* put terminal in raw mode after flushing */ + if (tcsetattr(fd,TCSAFLUSH,&raw) < 0) goto fatal; + rawmode = 1; + return 0; + +fatal: + errno = ENOTTY; + return -1; +} + +static void disableRawMode(int fd) { + /* Don't even check the return value as it's too late. */ + if (rawmode && tcsetattr(fd,TCSAFLUSH,&orig_termios) != -1) + rawmode = 0; +} + +/* Use the ESC [6n escape sequence to query the horizontal cursor position + * and return it. On error -1 is returned, on success the position of the + * cursor. */ +static int getCursorPosition(int ifd, int ofd) { + char buf[32]; + int cols, rows; + unsigned int i = 0; + + /* Report cursor location */ + if (write(ofd, "\x1b[6n", 4) != 4) return -1; + + /* Read the response: ESC [ rows ; cols R */ + while (i < sizeof(buf)-1) { + if (read(ifd,buf+i,1) != 1) break; + if (buf[i] == 'R') break; + i++; + } + buf[i] = '\0'; + + /* Parse it. */ + if (buf[0] != ESC || buf[1] != '[') return -1; + if (sscanf(buf+2,"%d;%d",&rows,&cols) != 2) return -1; + return cols; +} + +/* Try to get the number of columns in the current terminal, or assume 80 + * if it fails. */ +static int getColumns(int ifd, int ofd) { + struct winsize ws; + + if (ioctl(1, TIOCGWINSZ, &ws) == -1 || ws.ws_col == 0) { + /* ioctl() failed. Try to query the terminal itself. */ + int start, cols; + + /* Get the initial position so we can restore it later. */ + start = getCursorPosition(ifd,ofd); + if (start == -1) goto failed; + + /* Go to right margin and get position. */ + if (write(ofd,"\x1b[999C",6) != 6) goto failed; + cols = getCursorPosition(ifd,ofd); + if (cols == -1) goto failed; + + /* Restore position. */ + if (cols > start) { + char seq[32]; + snprintf(seq,32,"\x1b[%dD",cols-start); + if (write(ofd,seq,strlen(seq)) == -1) { + /* Can't recover... */ + } + } + return cols; + } else { + return ws.ws_col; + } + +failed: + return 80; +} + +/* Clear the screen. Used to handle ctrl+l */ +void linenoiseClearScreen(void) { + if (write(STDOUT_FILENO,"\x1b[H\x1b[2J",7) <= 0) { + /* nothing to do, just to avoid warning. */ + } +} + +/* Beep, used for completion when there is nothing to complete or when all + * the choices were already shown. */ +static void linenoiseBeep(void) { + fprintf(stderr, "\x7"); + fflush(stderr); +} + +/* ============================== Completion ================================ */ + +/* Free a list of completion option populated by linenoiseAddCompletion(). */ +static void freeCompletions(linenoiseCompletions *lc) { + size_t i; + for (i = 0; i < lc->len; i++) + free(lc->cvec[i]); + if (lc->cvec != NULL) + free(lc->cvec); +} + +/* This is an helper function for linenoiseEdit() and is called when the + * user types the key in order to complete the string currently in the + * input. + * + * The state of the editing is encapsulated into the pointed linenoiseState + * structure as described in the structure definition. */ +static int completeLine(struct linenoiseState *ls) { + linenoiseCompletions lc = { 0, NULL }; + int nread, nwritten; + char c = 0; + + completionCallback(ls->buf,&lc); + if (lc.len == 0) { + linenoiseBeep(); + } else { + size_t stop = 0, i = 0; + + while(!stop) { + /* Show completion or original buffer */ + if (i < lc.len) { + struct linenoiseState saved = *ls; + + ls->len = ls->pos = strlen(lc.cvec[i]); + ls->buf = lc.cvec[i]; + refreshLine(ls); + ls->len = saved.len; + ls->pos = saved.pos; + ls->buf = saved.buf; + } else { + refreshLine(ls); + } + + nread = read(ls->ifd,&c,1); + if (nread <= 0) { + freeCompletions(&lc); + return -1; + } + + switch(c) { + case 9: /* tab */ + i = (i+1) % (lc.len+1); + if (i == lc.len) linenoiseBeep(); + break; + case 27: /* escape */ + /* Re-show original buffer */ + if (i < lc.len) refreshLine(ls); + stop = 1; + break; + default: + /* Update buffer and return */ + if (i < lc.len) { + nwritten = snprintf(ls->buf,ls->buflen,"%s",lc.cvec[i]); + ls->len = ls->pos = nwritten; + } + stop = 1; + break; + } + } + } + + freeCompletions(&lc); + return c; /* Return last read character */ +} + +/* Register a callback function to be called for tab-completion. */ +void linenoiseSetCompletionCallback(linenoiseCompletionCallback *fn) { + completionCallback = fn; +} + +/* Register a hits function to be called to show hits to the user at the + * right of the prompt. */ +void linenoiseSetHintsCallback(linenoiseHintsCallback *fn) { + hintsCallback = fn; +} + +/* Register a function to free the hints returned by the hints callback + * registered with linenoiseSetHintsCallback(). */ +void linenoiseSetFreeHintsCallback(linenoiseFreeHintsCallback *fn) { + freeHintsCallback = fn; +} + +/* This function is used by the callback function registered by the user + * in order to add completion options given the input string when the + * user typed . See the example.c source code for a very easy to + * understand example. */ +void linenoiseAddCompletion(linenoiseCompletions *lc, const char *str) { + size_t len = strlen(str); + char *copy, **cvec; + + copy = malloc(len+1); + if (copy == NULL) return; + memcpy(copy,str,len+1); + cvec = realloc(lc->cvec,sizeof(char*)*(lc->len+1)); + if (cvec == NULL) { + free(copy); + return; + } + lc->cvec = cvec; + lc->cvec[lc->len++] = copy; +} + +/* =========================== Line editing ================================= */ + +/* We define a very simple "append buffer" structure, that is an heap + * allocated string where we can append to. This is useful in order to + * write all the escape sequences in a buffer and flush them to the standard + * output in a single call, to avoid flickering effects. */ +struct abuf { + char *b; + int len; +}; + +static void abInit(struct abuf *ab) { + ab->b = NULL; + ab->len = 0; +} + +static void abAppend(struct abuf *ab, const char *s, int len) { + char *new = realloc(ab->b,ab->len+len); + + if (new == NULL) return; + memcpy(new+ab->len,s,len); + ab->b = new; + ab->len += len; +} + +static void abFree(struct abuf *ab) { + free(ab->b); +} + +/* Helper of refreshSingleLine() and refreshMultiLine() to show hints + * to the right of the prompt. */ +void refreshShowHints(struct abuf *ab, struct linenoiseState *l, int plen) { + char seq[64]; + if (hintsCallback && plen+l->len < l->cols) { + int color = -1, bold = 0; + char *hint = hintsCallback(l->buf,&color,&bold); + if (hint) { + int hintlen = strlen(hint); + int hintmaxlen = l->cols-(plen+l->len); + if (hintlen > hintmaxlen) hintlen = hintmaxlen; + if (bold == 1 && color == -1) color = 37; + if (color != -1 || bold != 0) + snprintf(seq,64,"\033[%d;%d;49m",bold,color); + else + seq[0] = '\0'; + abAppend(ab,seq,strlen(seq)); + abAppend(ab,hint,hintlen); + if (color != -1 || bold != 0) + abAppend(ab,"\033[0m",4); + /* Call the function to free the hint returned. */ + if (freeHintsCallback) freeHintsCallback(hint); + } + } +} + +/* Single line low level line refresh. + * + * Rewrite the currently edited line accordingly to the buffer content, + * cursor position, and number of columns of the terminal. */ +static void refreshSingleLine(struct linenoiseState *l) { + char seq[64]; + size_t plen = strlen(l->prompt); + int fd = l->ofd; + char *buf = l->buf; + size_t len = l->len; + size_t pos = l->pos; + struct abuf ab; + + while((plen+pos) >= l->cols) { + buf++; + len--; + pos--; + } + while (plen+len > l->cols) { + len--; + } + + abInit(&ab); + /* Cursor to left edge */ + snprintf(seq,64,"\r"); + abAppend(&ab,seq,strlen(seq)); + /* Write the prompt and the current buffer content */ + abAppend(&ab,l->prompt,strlen(l->prompt)); + if (maskmode == 1) { + while (len--) abAppend(&ab,"*",1); + } else { + abAppend(&ab,buf,len); + } + /* Show hits if any. */ + refreshShowHints(&ab,l,plen); + /* Erase to right */ + snprintf(seq,64,"\x1b[0K"); + abAppend(&ab,seq,strlen(seq)); + /* Move cursor to original position. */ + snprintf(seq,64,"\r\x1b[%dC", (int)(pos+plen)); + abAppend(&ab,seq,strlen(seq)); + if (write(fd,ab.b,ab.len) == -1) {} /* Can't recover from write error. */ + abFree(&ab); +} + +/* Multi line low level line refresh. + * + * Rewrite the currently edited line accordingly to the buffer content, + * cursor position, and number of columns of the terminal. */ +static void refreshMultiLine(struct linenoiseState *l) { + char seq[64]; + int plen = strlen(l->prompt); + int rows = (plen+l->len+l->cols-1)/l->cols; /* rows used by current buf. */ + int rpos = (plen+l->oldpos+l->cols)/l->cols; /* cursor relative row. */ + int rpos2; /* rpos after refresh. */ + int col; /* colum position, zero-based. */ + int old_rows = l->maxrows; + int fd = l->ofd, j; + struct abuf ab; + + /* Update maxrows if needed. */ + if (rows > (int)l->maxrows) l->maxrows = rows; + + /* First step: clear all the lines used before. To do so start by + * going to the last row. */ + abInit(&ab); + if (old_rows-rpos > 0) { + lndebug("go down %d", old_rows-rpos); + snprintf(seq,64,"\x1b[%dB", old_rows-rpos); + abAppend(&ab,seq,strlen(seq)); + } + + /* Now for every row clear it, go up. */ + for (j = 0; j < old_rows-1; j++) { + lndebug("clear+up"); + snprintf(seq,64,"\r\x1b[0K\x1b[1A"); + abAppend(&ab,seq,strlen(seq)); + } + + /* Clean the top line. */ + lndebug("clear"); + snprintf(seq,64,"\r\x1b[0K"); + abAppend(&ab,seq,strlen(seq)); + + /* Write the prompt and the current buffer content */ + abAppend(&ab,l->prompt,strlen(l->prompt)); + if (maskmode == 1) { + unsigned int i; + for (i = 0; i < l->len; i++) abAppend(&ab,"*",1); + } else { + abAppend(&ab,l->buf,l->len); + } + + /* Show hits if any. */ + refreshShowHints(&ab,l,plen); + + /* If we are at the very end of the screen with our prompt, we need to + * emit a newline and move the prompt to the first column. */ + if (l->pos && + l->pos == l->len && + (l->pos+plen) % l->cols == 0) + { + lndebug(""); + abAppend(&ab,"\n",1); + snprintf(seq,64,"\r"); + abAppend(&ab,seq,strlen(seq)); + rows++; + if (rows > (int)l->maxrows) l->maxrows = rows; + } + + /* Move cursor to right position. */ + rpos2 = (plen+l->pos+l->cols)/l->cols; /* current cursor relative row. */ + lndebug("rpos2 %d", rpos2); + + /* Go up till we reach the expected position. */ + if (rows-rpos2 > 0) { + lndebug("go-up %d", rows-rpos2); + snprintf(seq,64,"\x1b[%dA", rows-rpos2); + abAppend(&ab,seq,strlen(seq)); + } + + /* Set column. */ + col = (plen+(int)l->pos) % (int)l->cols; + lndebug("set col %d", 1+col); + if (col) + snprintf(seq,64,"\r\x1b[%dC", col); + else + snprintf(seq,64,"\r"); + abAppend(&ab,seq,strlen(seq)); + + lndebug("\n"); + l->oldpos = l->pos; + + if (write(fd,ab.b,ab.len) == -1) {} /* Can't recover from write error. */ + abFree(&ab); +} + +/* Calls the two low level functions refreshSingleLine() or + * refreshMultiLine() according to the selected mode. */ +static void refreshLine(struct linenoiseState *l) { + if (mlmode) + refreshMultiLine(l); + else + refreshSingleLine(l); +} + +/* Insert the character 'c' at cursor current position. + * + * On error writing to the terminal -1 is returned, otherwise 0. */ +int linenoiseEditInsert(struct linenoiseState *l, char c) { + if (l->len < l->buflen) { + if (l->len == l->pos) { + l->buf[l->pos] = c; + l->pos++; + l->len++; + l->buf[l->len] = '\0'; + if ((!mlmode && l->plen+l->len < l->cols && !hintsCallback)) { + /* Avoid a full update of the line in the + * trivial case. */ + char d = (maskmode==1) ? '*' : c; + if (write(l->ofd,&d,1) == -1) return -1; + } else { + refreshLine(l); + } + } else { + memmove(l->buf+l->pos+1,l->buf+l->pos,l->len-l->pos); + l->buf[l->pos] = c; + l->len++; + l->pos++; + l->buf[l->len] = '\0'; + refreshLine(l); + } + } + return 0; +} + +/* Move cursor on the left. */ +void linenoiseEditMoveLeft(struct linenoiseState *l) { + if (l->pos > 0) { + l->pos--; + refreshLine(l); + } +} + +/* Move cursor on the right. */ +void linenoiseEditMoveRight(struct linenoiseState *l) { + if (l->pos != l->len) { + l->pos++; + refreshLine(l); + } +} + +/* Move cursor to the start of the line. */ +void linenoiseEditMoveHome(struct linenoiseState *l) { + if (l->pos != 0) { + l->pos = 0; + refreshLine(l); + } +} + +/* Move cursor to the end of the line. */ +void linenoiseEditMoveEnd(struct linenoiseState *l) { + if (l->pos != l->len) { + l->pos = l->len; + refreshLine(l); + } +} + +/* Substitute the currently edited line with the next or previous history + * entry as specified by 'dir'. */ +#define LINENOISE_HISTORY_NEXT 0 +#define LINENOISE_HISTORY_PREV 1 +void linenoiseEditHistoryNext(struct linenoiseState *l, int dir) { + if (history_len > 1) { + /* Update the current history entry before to + * overwrite it with the next one. */ + free(history[history_len - 1 - l->history_index]); + history[history_len - 1 - l->history_index] = strdup(l->buf); + /* Show the new entry */ + l->history_index += (dir == LINENOISE_HISTORY_PREV) ? 1 : -1; + if (l->history_index < 0) { + l->history_index = 0; + return; + } else if (l->history_index >= history_len) { + l->history_index = history_len-1; + return; + } + strncpy(l->buf,history[history_len - 1 - l->history_index],l->buflen); + l->buf[l->buflen-1] = '\0'; + l->len = l->pos = strlen(l->buf); + refreshLine(l); + } +} + +/* Delete the character at the right of the cursor without altering the cursor + * position. Basically this is what happens with the "Delete" keyboard key. */ +void linenoiseEditDelete(struct linenoiseState *l) { + if (l->len > 0 && l->pos < l->len) { + memmove(l->buf+l->pos,l->buf+l->pos+1,l->len-l->pos-1); + l->len--; + l->buf[l->len] = '\0'; + refreshLine(l); + } +} + +/* Backspace implementation. */ +void linenoiseEditBackspace(struct linenoiseState *l) { + if (l->pos > 0 && l->len > 0) { + memmove(l->buf+l->pos-1,l->buf+l->pos,l->len-l->pos); + l->pos--; + l->len--; + l->buf[l->len] = '\0'; + refreshLine(l); + } +} + +/* Delete the previous word, maintaining the cursor at the start of the + * current word. */ +void linenoiseEditDeletePrevWord(struct linenoiseState *l) { + size_t old_pos = l->pos; + size_t diff; + + while (l->pos > 0 && l->buf[l->pos-1] == ' ') + l->pos--; + while (l->pos > 0 && l->buf[l->pos-1] != ' ') + l->pos--; + diff = old_pos - l->pos; + memmove(l->buf+l->pos,l->buf+old_pos,l->len-old_pos+1); + l->len -= diff; + refreshLine(l); +} + +/* This function is the core of the line editing capability of linenoise. + * It expects 'fd' to be already in "raw mode" so that every key pressed + * will be returned ASAP to read(). + * + * The resulting string is put into 'buf' when the user type enter, or + * when ctrl+d is typed. + * + * The function returns the length of the current buffer. */ +static int linenoiseEdit(int stdin_fd, int stdout_fd, char *buf, size_t buflen, const char *prompt) +{ + struct linenoiseState l; + + /* Populate the linenoise state that we pass to functions implementing + * specific editing functionalities. */ + l.ifd = stdin_fd; + l.ofd = stdout_fd; + l.buf = buf; + l.buflen = buflen; + l.prompt = prompt; + l.plen = strlen(prompt); + l.oldpos = l.pos = 0; + l.len = 0; + l.cols = getColumns(stdin_fd, stdout_fd); + l.maxrows = 0; + l.history_index = 0; + + /* Buffer starts empty. */ + l.buf[0] = '\0'; + l.buflen--; /* Make sure there is always space for the nulterm */ + + /* The latest history entry is always our current buffer, that + * initially is just an empty string. */ + linenoiseHistoryAdd(""); + + if (write(l.ofd,prompt,l.plen) == -1) return -1; + while(1) { + char c; + int nread; + char seq[3]; + + nread = read(l.ifd,&c,1); + if (nread <= 0) return l.len; + + /* Only autocomplete when the callback is set. It returns < 0 when + * there was an error reading from fd. Otherwise it will return the + * character that should be handled next. */ + if (c == 9 && completionCallback != NULL) { + c = completeLine(&l); + /* Return on errors */ + if (c < 0) return l.len; + /* Read next character when 0 */ + if (c == 0) continue; + } + + switch(c) { + case ENTER: /* enter */ + history_len--; + free(history[history_len]); + if (mlmode) linenoiseEditMoveEnd(&l); + if (hintsCallback) { + /* Force a refresh without hints to leave the previous + * line as the user typed it after a newline. */ + linenoiseHintsCallback *hc = hintsCallback; + hintsCallback = NULL; + refreshLine(&l); + hintsCallback = hc; + } + return (int)l.len; + case CTRL_C: /* ctrl-c */ + errno = EAGAIN; + return -1; + case BACKSPACE: /* backspace */ + case 8: /* ctrl-h */ + linenoiseEditBackspace(&l); + break; + case CTRL_D: /* ctrl-d, remove char at right of cursor, or if the + line is empty, act as end-of-file. */ + if (l.len > 0) { + linenoiseEditDelete(&l); + } else { + history_len--; + free(history[history_len]); + return -1; + } + break; + case CTRL_T: /* ctrl-t, swaps current character with previous. */ + if (l.pos > 0 && l.pos < l.len) { + int aux = buf[l.pos-1]; + buf[l.pos-1] = buf[l.pos]; + buf[l.pos] = aux; + if (l.pos != l.len-1) l.pos++; + refreshLine(&l); + } + break; + case CTRL_B: /* ctrl-b */ + linenoiseEditMoveLeft(&l); + break; + case CTRL_F: /* ctrl-f */ + linenoiseEditMoveRight(&l); + break; + case CTRL_P: /* ctrl-p */ + linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_PREV); + break; + case CTRL_N: /* ctrl-n */ + linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_NEXT); + break; + case ESC: /* escape sequence */ + /* Read the next two bytes representing the escape sequence. + * Use two calls to handle slow terminals returning the two + * chars at different times. */ + if (read(l.ifd,seq,1) == -1) break; + if (read(l.ifd,seq+1,1) == -1) break; + + /* ESC [ sequences. */ + if (seq[0] == '[') { + if (seq[1] >= '0' && seq[1] <= '9') { + /* Extended escape, read additional byte. */ + if (read(l.ifd,seq+2,1) == -1) break; + if (seq[2] == '~') { + switch(seq[1]) { + case '3': /* Delete key. */ + linenoiseEditDelete(&l); + break; + } + } + } else { + switch(seq[1]) { + case 'A': /* Up */ + linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_PREV); + break; + case 'B': /* Down */ + linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_NEXT); + break; + case 'C': /* Right */ + linenoiseEditMoveRight(&l); + break; + case 'D': /* Left */ + linenoiseEditMoveLeft(&l); + break; + case 'H': /* Home */ + linenoiseEditMoveHome(&l); + break; + case 'F': /* End*/ + linenoiseEditMoveEnd(&l); + break; + } + } + } + + /* ESC O sequences. */ + else if (seq[0] == 'O') { + switch(seq[1]) { + case 'H': /* Home */ + linenoiseEditMoveHome(&l); + break; + case 'F': /* End*/ + linenoiseEditMoveEnd(&l); + break; + } + } + break; + default: + if (linenoiseEditInsert(&l,c)) return -1; + break; + case CTRL_U: /* Ctrl+u, delete the whole line. */ + buf[0] = '\0'; + l.pos = l.len = 0; + refreshLine(&l); + break; + case CTRL_K: /* Ctrl+k, delete from current to end of line. */ + buf[l.pos] = '\0'; + l.len = l.pos; + refreshLine(&l); + break; + case CTRL_A: /* Ctrl+a, go to the start of the line */ + linenoiseEditMoveHome(&l); + break; + case CTRL_E: /* ctrl+e, go to the end of the line */ + linenoiseEditMoveEnd(&l); + break; + case CTRL_L: /* ctrl+l, clear screen */ + linenoiseClearScreen(); + refreshLine(&l); + break; + case CTRL_W: /* ctrl+w, delete previous word */ + linenoiseEditDeletePrevWord(&l); + break; + } + } + return l.len; +} + +/* This special mode is used by linenoise in order to print scan codes + * on screen for debugging / development purposes. It is implemented + * by the linenoise_example program using the --keycodes option. */ +void linenoisePrintKeyCodes(void) { + char quit[4]; + + printf("Linenoise key codes debugging mode.\n" + "Press keys to see scan codes. Type 'quit' at any time to exit.\n"); + if (enableRawMode(STDIN_FILENO) == -1) return; + memset(quit,' ',4); + while(1) { + char c; + int nread; + + nread = read(STDIN_FILENO,&c,1); + if (nread <= 0) continue; + memmove(quit,quit+1,sizeof(quit)-1); /* shift string to left. */ + quit[sizeof(quit)-1] = c; /* Insert current char on the right. */ + if (memcmp(quit,"quit",sizeof(quit)) == 0) break; + + printf("'%c' %02x (%d) (type quit to exit)\n", + isprint(c) ? c : '?', (int)c, (int)c); + printf("\r"); /* Go left edge manually, we are in raw mode. */ + fflush(stdout); + } + disableRawMode(STDIN_FILENO); +} + +/* This function calls the line editing function linenoiseEdit() using + * the STDIN file descriptor set in raw mode. */ +static int linenoiseRaw(char *buf, size_t buflen, const char *prompt) { + int count; + + if (buflen == 0) { + errno = EINVAL; + return -1; + } + + if (enableRawMode(STDIN_FILENO) == -1) return -1; + count = linenoiseEdit(STDIN_FILENO, STDOUT_FILENO, buf, buflen, prompt); + disableRawMode(STDIN_FILENO); + printf("\n"); + return count; +} + +/* This function is called when linenoise() is called with the standard + * input file descriptor not attached to a TTY. So for example when the + * program using linenoise is called in pipe or with a file redirected + * to its standard input. In this case, we want to be able to return the + * line regardless of its length (by default we are limited to 4k). */ +static char *linenoiseNoTTY(void) { + char *line = NULL; + size_t len = 0, maxlen = 0; + + while(1) { + if (len == maxlen) { + if (maxlen == 0) maxlen = 16; + maxlen *= 2; + char *oldval = line; + line = realloc(line,maxlen); + if (line == NULL) { + if (oldval) free(oldval); + return NULL; + } + } + int c = fgetc(stdin); + if (c == EOF || c == '\n') { + if (c == EOF && len == 0) { + free(line); + return NULL; + } else { + line[len] = '\0'; + return line; + } + } else { + line[len] = c; + len++; + } + } +} + +/* The high level function that is the main API of the linenoise library. + * This function checks if the terminal has basic capabilities, just checking + * for a blacklist of stupid terminals, and later either calls the line + * editing function or uses dummy fgets() so that you will be able to type + * something even in the most desperate of the conditions. */ +char *linenoise(const char *prompt) { + char buf[LINENOISE_MAX_LINE]; + int count; + + if (!isatty(STDIN_FILENO)) { + /* Not a tty: read from file / pipe. In this mode we don't want any + * limit to the line size, so we call a function to handle that. */ + return linenoiseNoTTY(); + } else if (isUnsupportedTerm()) { + size_t len; + + printf("%s",prompt); + fflush(stdout); + if (fgets(buf,LINENOISE_MAX_LINE,stdin) == NULL) return NULL; + len = strlen(buf); + while(len && (buf[len-1] == '\n' || buf[len-1] == '\r')) { + len--; + buf[len] = '\0'; + } + return strdup(buf); + } else { + count = linenoiseRaw(buf,LINENOISE_MAX_LINE,prompt); + if (count == -1) return NULL; + return strdup(buf); + } +} + +/* This is just a wrapper the user may want to call in order to make sure + * the linenoise returned buffer is freed with the same allocator it was + * created with. Useful when the main program is using an alternative + * allocator. */ +void linenoiseFree(void *ptr) { + free(ptr); +} + +/* ================================ History ================================= */ + +/* Free the history, but does not reset it. Only used when we have to + * exit() to avoid memory leaks are reported by valgrind & co. */ +static void freeHistory(void) { + if (history) { + int j; + + for (j = 0; j < history_len; j++) + free(history[j]); + free(history); + } +} + +/* At exit we'll try to fix the terminal to the initial conditions. */ +static void linenoiseAtExit(void) { + disableRawMode(STDIN_FILENO); + freeHistory(); +} + +/* This is the API call to add a new entry in the linenoise history. + * It uses a fixed array of char pointers that are shifted (memmoved) + * when the history max length is reached in order to remove the older + * entry and make room for the new one, so it is not exactly suitable for huge + * histories, but will work well for a few hundred of entries. + * + * Using a circular buffer is smarter, but a bit more complex to handle. */ +int linenoiseHistoryAdd(const char *line) { + char *linecopy; + + if (history_max_len == 0) return 0; + + /* Initialization on first call. */ + if (history == NULL) { + history = malloc(sizeof(char*)*history_max_len); + if (history == NULL) return 0; + memset(history,0,(sizeof(char*)*history_max_len)); + } + + /* Don't add duplicated lines. */ + if (history_len && !strcmp(history[history_len-1], line)) return 0; + + /* Add an heap allocated copy of the line in the history. + * If we reached the max length, remove the older line. */ + linecopy = strdup(line); + if (!linecopy) return 0; + if (history_len == history_max_len) { + free(history[0]); + memmove(history,history+1,sizeof(char*)*(history_max_len-1)); + history_len--; + } + history[history_len] = linecopy; + history_len++; + return 1; +} + +/* Set the maximum length for the history. This function can be called even + * if there is already some history, the function will make sure to retain + * just the latest 'len' elements if the new history length value is smaller + * than the amount of items already inside the history. */ +int linenoiseHistorySetMaxLen(int len) { + char **new; + + if (len < 1) return 0; + if (history) { + int tocopy = history_len; + + new = malloc(sizeof(char*)*len); + if (new == NULL) return 0; + + /* If we can't copy everything, free the elements we'll not use. */ + if (len < tocopy) { + int j; + + for (j = 0; j < tocopy-len; j++) free(history[j]); + tocopy = len; + } + memset(new,0,sizeof(char*)*len); + memcpy(new,history+(history_len-tocopy), sizeof(char*)*tocopy); + free(history); + history = new; + } + history_max_len = len; + if (history_len > history_max_len) + history_len = history_max_len; + return 1; +} + +/* Save the history in the specified file. On success 0 is returned + * otherwise -1 is returned. */ +int linenoiseHistorySave(const char *filename) { + mode_t old_umask = umask(S_IXUSR|S_IRWXG|S_IRWXO); + FILE *fp; + int j; + + fp = fopen(filename,"w"); + umask(old_umask); + if (fp == NULL) return -1; + fchmod(fileno(fp),S_IRUSR|S_IWUSR); + for (j = 0; j < history_len; j++) + fprintf(fp,"%s\n",history[j]); + fclose(fp); + return 0; +} + +/* Load the history from the specified file. If the file does not exist + * zero is returned and no operation is performed. + * + * If the file exists and the operation succeeded 0 is returned, otherwise + * on error -1 is returned. */ +int linenoiseHistoryLoad(const char *filename) { + FILE *fp = fopen(filename,"r"); + char buf[LINENOISE_MAX_LINE]; + + if (fp == NULL) return -1; + + while (fgets(buf,LINENOISE_MAX_LINE,fp) != NULL) { + char *p; + + p = strchr(buf,'\r'); + if (!p) p = strchr(buf,'\n'); + if (p) *p = '\0'; + linenoiseHistoryAdd(buf); + } + fclose(fp); + return 0; +} diff --git a/deps/linenoise/linenoise.h b/deps/linenoise/linenoise.h new file mode 100644 index 0000000..6dfee73 --- /dev/null +++ b/deps/linenoise/linenoise.h @@ -0,0 +1,75 @@ +/* linenoise.h -- VERSION 1.0 + * + * Guerrilla line editing library against the idea that a line editing lib + * needs to be 20,000 lines of C code. + * + * See linenoise.c for more information. + * + * ------------------------------------------------------------------------ + * + * Copyright (c) 2010-2014, Salvatore Sanfilippo + * Copyright (c) 2010-2013, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __LINENOISE_H +#define __LINENOISE_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct linenoiseCompletions { + size_t len; + char **cvec; +} linenoiseCompletions; + +typedef void(linenoiseCompletionCallback)(const char *, linenoiseCompletions *); +typedef char*(linenoiseHintsCallback)(const char *, int *color, int *bold); +typedef void(linenoiseFreeHintsCallback)(void *); +void linenoiseSetCompletionCallback(linenoiseCompletionCallback *); +void linenoiseSetHintsCallback(linenoiseHintsCallback *); +void linenoiseSetFreeHintsCallback(linenoiseFreeHintsCallback *); +void linenoiseAddCompletion(linenoiseCompletions *, const char *); + +char *linenoise(const char *prompt); +void linenoiseFree(void *ptr); +int linenoiseHistoryAdd(const char *line); +int linenoiseHistorySetMaxLen(int len); +int linenoiseHistorySave(const char *filename); +int linenoiseHistoryLoad(const char *filename); +void linenoiseClearScreen(void); +void linenoiseSetMultiLine(int ml); +void linenoisePrintKeyCodes(void); +void linenoiseMaskModeEnable(void); +void linenoiseMaskModeDisable(void); + +#ifdef __cplusplus +} +#endif + +#endif /* __LINENOISE_H */ diff --git a/deps/lua/COPYRIGHT b/deps/lua/COPYRIGHT new file mode 100644 index 0000000..a860268 --- /dev/null +++ b/deps/lua/COPYRIGHT @@ -0,0 +1,34 @@ +Lua License +----------- + +Lua is licensed under the terms of the MIT license reproduced below. +This means that Lua is free software and can be used for both academic +and commercial purposes at absolutely no cost. + +For details and rationale, see http://www.lua.org/license.html . + +=============================================================================== + +Copyright (C) 1994-2012 Lua.org, PUC-Rio. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +=============================================================================== + +(end of COPYRIGHT) diff --git a/deps/lua/HISTORY b/deps/lua/HISTORY new file mode 100644 index 0000000..ce0c95b --- /dev/null +++ b/deps/lua/HISTORY @@ -0,0 +1,183 @@ +HISTORY for Lua 5.1 + +* Changes from version 5.0 to 5.1 + ------------------------------- + Language: + + new module system. + + new semantics for control variables of fors. + + new semantics for setn/getn. + + new syntax/semantics for varargs. + + new long strings and comments. + + new `mod' operator (`%') + + new length operator #t + + metatables for all types + API: + + new functions: lua_createtable, lua_get(set)field, lua_push(to)integer. + + user supplies memory allocator (lua_open becomes lua_newstate). + + luaopen_* functions must be called through Lua. + Implementation: + + new configuration scheme via luaconf.h. + + incremental garbage collection. + + better handling of end-of-line in the lexer. + + fully reentrant parser (new Lua function `load') + + better support for 64-bit machines. + + native loadlib support for Mac OS X. + + standard distribution in only one library (lualib.a merged into lua.a) + +* Changes from version 4.0 to 5.0 + ------------------------------- + Language: + + lexical scoping. + + Lua coroutines. + + standard libraries now packaged in tables. + + tags replaced by metatables and tag methods replaced by metamethods, + stored in metatables. + + proper tail calls. + + each function can have its own global table, which can be shared. + + new __newindex metamethod, called when we insert a new key into a table. + + new block comments: --[[ ... ]]. + + new generic for. + + new weak tables. + + new boolean type. + + new syntax "local function". + + (f()) returns the first value returned by f. + + {f()} fills a table with all values returned by f. + + \n ignored in [[\n . + + fixed and-or priorities. + + more general syntax for function definition (e.g. function a.x.y:f()...end). + + more general syntax for function calls (e.g. (print or write)(9)). + + new functions (time/date, tmpfile, unpack, require, load*, etc.). + API: + + chunks are loaded by using lua_load; new luaL_loadfile and luaL_loadbuffer. + + introduced lightweight userdata, a simple "void*" without a metatable. + + new error handling protocol: the core no longer prints error messages; + all errors are reported to the caller on the stack. + + new lua_atpanic for host cleanup. + + new, signal-safe, hook scheme. + Implementation: + + new license: MIT. + + new, faster, register-based virtual machine. + + support for external multithreading and coroutines. + + new and consistent error message format. + + the core no longer needs "stdio.h" for anything (except for a single + use of sprintf to convert numbers to strings). + + lua.c now runs the environment variable LUA_INIT, if present. It can + be "@filename", to run a file, or the chunk itself. + + support for user extensions in lua.c. + sample implementation given for command line editing. + + new dynamic loading library, active by default on several platforms. + + safe garbage-collector metamethods. + + precompiled bytecodes checked for integrity (secure binary dostring). + + strings are fully aligned. + + position capture in string.find. + + read('*l') can read lines with embedded zeros. + +* Changes from version 3.2 to 4.0 + ------------------------------- + Language: + + new "break" and "for" statements (both numerical and for tables). + + uniform treatment of globals: globals are now stored in a Lua table. + + improved error messages. + + no more '$debug': full speed *and* full debug information. + + new read form: read(N) for next N bytes. + + general read patterns now deprecated. + (still available with -DCOMPAT_READPATTERNS.) + + all return values are passed as arguments for the last function + (old semantics still available with -DLUA_COMPAT_ARGRET) + + garbage collection tag methods for tables now deprecated. + + there is now only one tag method for order. + API: + + New API: fully re-entrant, simpler, and more efficient. + + New debug API. + Implementation: + + faster than ever: cleaner virtual machine and new hashing algorithm. + + non-recursive garbage-collector algorithm. + + reduced memory usage for programs with many strings. + + improved treatment for memory allocation errors. + + improved support for 16-bit machines (we hope). + + code now compiles unmodified as both ANSI C and C++. + + numbers in bases other than 10 are converted using strtoul. + + new -f option in Lua to support #! scripts. + + luac can now combine text and binaries. + +* Changes from version 3.1 to 3.2 + ------------------------------- + + redirected all output in Lua's core to _ERRORMESSAGE and _ALERT. + + increased limit on the number of constants and globals per function + (from 2^16 to 2^24). + + debugging info (lua_debug and hooks) moved into lua_state and new API + functions provided to get and set this info. + + new debug lib gives full debugging access within Lua. + + new table functions "foreachi", "sort", "tinsert", "tremove", "getn". + + new io functions "flush", "seek". + +* Changes from version 3.0 to 3.1 + ------------------------------- + + NEW FEATURE: anonymous functions with closures (via "upvalues"). + + new syntax: + - local variables in chunks. + - better scope control with DO block END. + - constructors can now be also written: { record-part; list-part }. + - more general syntax for function calls and lvalues, e.g.: + f(x).y=1 + o:f(x,y):g(z) + f"string" is sugar for f("string") + + strings may now contain arbitrary binary data (e.g., embedded zeros). + + major code re-organization and clean-up; reduced module interdependecies. + + no arbitrary limits on the total number of constants and globals. + + support for multiple global contexts. + + better syntax error messages. + + new traversal functions "foreach" and "foreachvar". + + the default for numbers is now double. + changing it to use floats or longs is easy. + + complete debug information stored in pre-compiled chunks. + + sample interpreter now prompts user when run interactively, and also + handles control-C interruptions gracefully. + +* Changes from version 2.5 to 3.0 + ------------------------------- + + NEW CONCEPT: "tag methods". + Tag methods replace fallbacks as the meta-mechanism for extending the + semantics of Lua. Whereas fallbacks had a global nature, tag methods + work on objects having the same tag (e.g., groups of tables). + Existing code that uses fallbacks should work without change. + + new, general syntax for constructors {[exp] = exp, ... }. + + support for handling variable number of arguments in functions (varargs). + + support for conditional compilation ($if ... $else ... $end). + + cleaner semantics in API simplifies host code. + + better support for writing libraries (auxlib.h). + + better type checking and error messages in the standard library. + + luac can now also undump. + +* Changes from version 2.4 to 2.5 + ------------------------------- + + io and string libraries are now based on pattern matching; + the old libraries are still available for compatibility + + dofile and dostring can now return values (via return statement) + + better support for 16- and 64-bit machines + + expanded documentation, with more examples + +* Changes from version 2.2 to 2.4 + ------------------------------- + + external compiler creates portable binary files that can be loaded faster + + interface for debugging and profiling + + new "getglobal" fallback + + new functions for handling references to Lua objects + + new functions in standard lib + + only one copy of each string is stored + + expanded documentation, with more examples + +* Changes from version 2.1 to 2.2 + ------------------------------- + + functions now may be declared with any "lvalue" as a name + + garbage collection of functions + + support for pipes + +* Changes from version 1.1 to 2.1 + ------------------------------- + + object-oriented support + + fallbacks + + simplified syntax for tables + + many internal improvements + +(end of HISTORY) diff --git a/deps/lua/INSTALL b/deps/lua/INSTALL new file mode 100644 index 0000000..17eb8ae --- /dev/null +++ b/deps/lua/INSTALL @@ -0,0 +1,99 @@ +INSTALL for Lua 5.1 + +* Building Lua + ------------ + Lua is built in the src directory, but the build process can be + controlled from the top-level Makefile. + + Building Lua on Unix systems should be very easy. First do "make" and + see if your platform is listed. If so, just do "make xxx", where xxx + is your platform name. The platforms currently supported are: + aix ansi bsd freebsd generic linux macosx mingw posix solaris + + If your platform is not listed, try the closest one or posix, generic, + ansi, in this order. + + See below for customization instructions and for instructions on how + to build with other Windows compilers. + + If you want to check that Lua has been built correctly, do "make test" + after building Lua. Also, have a look at the example programs in test. + +* Installing Lua + -------------- + Once you have built Lua, you may want to install it in an official + place in your system. In this case, do "make install". The official + place and the way to install files are defined in Makefile. You must + have the right permissions to install files. + + If you want to build and install Lua in one step, do "make xxx install", + where xxx is your platform name. + + If you want to install Lua locally, then do "make local". This will + create directories bin, include, lib, man, and install Lua there as + follows: + + bin: lua luac + include: lua.h luaconf.h lualib.h lauxlib.h lua.hpp + lib: liblua.a + man/man1: lua.1 luac.1 + + These are the only directories you need for development. + + There are man pages for lua and luac, in both nroff and html, and a + reference manual in html in doc, some sample code in test, and some + useful stuff in etc. You don't need these directories for development. + + If you want to install Lua locally, but in some other directory, do + "make install INSTALL_TOP=xxx", where xxx is your chosen directory. + + See below for instructions for Windows and other systems. + +* Customization + ------------- + Three things can be customized by editing a file: + - Where and how to install Lua -- edit Makefile. + - How to build Lua -- edit src/Makefile. + - Lua features -- edit src/luaconf.h. + + You don't actually need to edit the Makefiles because you may set the + relevant variables when invoking make. + + On the other hand, if you need to select some Lua features, you'll need + to edit src/luaconf.h. The edited file will be the one installed, and + it will be used by any Lua clients that you build, to ensure consistency. + + We strongly recommend that you enable dynamic loading. This is done + automatically for all platforms listed above that have this feature + (and also Windows). See src/luaconf.h and also src/Makefile. + +* Building Lua on Windows and other systems + ----------------------------------------- + If you're not using the usual Unix tools, then the instructions for + building Lua depend on the compiler you use. You'll need to create + projects (or whatever your compiler uses) for building the library, + the interpreter, and the compiler, as follows: + + library: lapi.c lcode.c ldebug.c ldo.c ldump.c lfunc.c lgc.c llex.c + lmem.c lobject.c lopcodes.c lparser.c lstate.c lstring.c + ltable.c ltm.c lundump.c lvm.c lzio.c + lauxlib.c lbaselib.c ldblib.c liolib.c lmathlib.c loslib.c + ltablib.c lstrlib.c loadlib.c linit.c + + interpreter: library, lua.c + + compiler: library, luac.c print.c + + If you use Visual Studio .NET, you can use etc/luavs.bat in its + "Command Prompt". + + If all you want is to build the Lua interpreter, you may put all .c files + in a single project, except for luac.c and print.c. Or just use etc/all.c. + + To use Lua as a library in your own programs, you'll need to know how to + create and use libraries with your compiler. + + As mentioned above, you may edit luaconf.h to select some features before + building Lua. + +(end of INSTALL) diff --git a/deps/lua/Makefile b/deps/lua/Makefile new file mode 100644 index 0000000..209a132 --- /dev/null +++ b/deps/lua/Makefile @@ -0,0 +1,128 @@ +# makefile for installing Lua +# see INSTALL for installation instructions +# see src/Makefile and src/luaconf.h for further customization + +# == CHANGE THE SETTINGS BELOW TO SUIT YOUR ENVIRONMENT ======================= + +# Your platform. See PLATS for possible values. +PLAT= none + +# Where to install. The installation starts in the src and doc directories, +# so take care if INSTALL_TOP is not an absolute path. +INSTALL_TOP= /usr/local +INSTALL_BIN= $(INSTALL_TOP)/bin +INSTALL_INC= $(INSTALL_TOP)/include +INSTALL_LIB= $(INSTALL_TOP)/lib +INSTALL_MAN= $(INSTALL_TOP)/man/man1 +# +# You probably want to make INSTALL_LMOD and INSTALL_CMOD consistent with +# LUA_ROOT, LUA_LDIR, and LUA_CDIR in luaconf.h (and also with etc/lua.pc). +INSTALL_LMOD= $(INSTALL_TOP)/share/lua/$V +INSTALL_CMOD= $(INSTALL_TOP)/lib/lua/$V + +# How to install. If your install program does not support "-p", then you +# may have to run ranlib on the installed liblua.a (do "make ranlib"). +INSTALL= install -p +INSTALL_EXEC= $(INSTALL) -m 0755 +INSTALL_DATA= $(INSTALL) -m 0644 +# +# If you don't have install you can use cp instead. +# INSTALL= cp -p +# INSTALL_EXEC= $(INSTALL) +# INSTALL_DATA= $(INSTALL) + +# Utilities. +MKDIR= mkdir -p +RANLIB= ranlib + +# == END OF USER SETTINGS. NO NEED TO CHANGE ANYTHING BELOW THIS LINE ========= + +# Convenience platforms targets. +PLATS= aix ansi bsd freebsd generic linux macosx mingw posix solaris + +# What to install. +TO_BIN= lua luac +TO_INC= lua.h luaconf.h lualib.h lauxlib.h ../etc/lua.hpp +TO_LIB= liblua.a +TO_MAN= lua.1 luac.1 + +# Lua version and release. +V= 5.1 +R= 5.1.5 + +all: $(PLAT) + +$(PLATS) clean: + cd src && $(MAKE) $@ + +test: dummy + src/lua test/hello.lua + +install: dummy + cd src && $(MKDIR) $(INSTALL_BIN) $(INSTALL_INC) $(INSTALL_LIB) $(INSTALL_MAN) $(INSTALL_LMOD) $(INSTALL_CMOD) + cd src && $(INSTALL_EXEC) $(TO_BIN) $(INSTALL_BIN) + cd src && $(INSTALL_DATA) $(TO_INC) $(INSTALL_INC) + cd src && $(INSTALL_DATA) $(TO_LIB) $(INSTALL_LIB) + cd doc && $(INSTALL_DATA) $(TO_MAN) $(INSTALL_MAN) + +ranlib: + cd src && cd $(INSTALL_LIB) && $(RANLIB) $(TO_LIB) + +local: + $(MAKE) install INSTALL_TOP=.. + +none: + @echo "Please do" + @echo " make PLATFORM" + @echo "where PLATFORM is one of these:" + @echo " $(PLATS)" + @echo "See INSTALL for complete instructions." + +# make may get confused with test/ and INSTALL in a case-insensitive OS +dummy: + +# echo config parameters +echo: + @echo "" + @echo "These are the parameters currently set in src/Makefile to build Lua $R:" + @echo "" + @cd src && $(MAKE) -s echo + @echo "" + @echo "These are the parameters currently set in Makefile to install Lua $R:" + @echo "" + @echo "PLAT = $(PLAT)" + @echo "INSTALL_TOP = $(INSTALL_TOP)" + @echo "INSTALL_BIN = $(INSTALL_BIN)" + @echo "INSTALL_INC = $(INSTALL_INC)" + @echo "INSTALL_LIB = $(INSTALL_LIB)" + @echo "INSTALL_MAN = $(INSTALL_MAN)" + @echo "INSTALL_LMOD = $(INSTALL_LMOD)" + @echo "INSTALL_CMOD = $(INSTALL_CMOD)" + @echo "INSTALL_EXEC = $(INSTALL_EXEC)" + @echo "INSTALL_DATA = $(INSTALL_DATA)" + @echo "" + @echo "See also src/luaconf.h ." + @echo "" + +# echo private config parameters +pecho: + @echo "V = $(V)" + @echo "R = $(R)" + @echo "TO_BIN = $(TO_BIN)" + @echo "TO_INC = $(TO_INC)" + @echo "TO_LIB = $(TO_LIB)" + @echo "TO_MAN = $(TO_MAN)" + +# echo config parameters as Lua code +# uncomment the last sed expression if you want nil instead of empty strings +lecho: + @echo "-- installation parameters for Lua $R" + @echo "VERSION = '$V'" + @echo "RELEASE = '$R'" + @$(MAKE) echo | grep = | sed -e 's/= /= "/' -e 's/$$/"/' #-e 's/""/nil/' + @echo "-- EOF" + +# list targets that do not create files (but not all makes understand .PHONY) +.PHONY: all $(PLATS) clean test install local none dummy echo pecho lecho + +# (end of Makefile) diff --git a/deps/lua/README b/deps/lua/README new file mode 100644 index 0000000..11b4dff --- /dev/null +++ b/deps/lua/README @@ -0,0 +1,37 @@ +README for Lua 5.1 + +See INSTALL for installation instructions. +See HISTORY for a summary of changes since the last released version. + +* What is Lua? + ------------ + Lua is a powerful, light-weight programming language designed for extending + applications. Lua is also frequently used as a general-purpose, stand-alone + language. Lua is free software. + + For complete information, visit Lua's web site at http://www.lua.org/ . + For an executive summary, see http://www.lua.org/about.html . + + Lua has been used in many different projects around the world. + For a short list, see http://www.lua.org/uses.html . + +* Availability + ------------ + Lua is freely available for both academic and commercial purposes. + See COPYRIGHT and http://www.lua.org/license.html for details. + Lua can be downloaded at http://www.lua.org/download.html . + +* Installation + ------------ + Lua is implemented in pure ANSI C, and compiles unmodified in all known + platforms that have an ANSI C compiler. In most Unix-like platforms, simply + do "make" with a suitable target. See INSTALL for detailed instructions. + +* Origin + ------ + Lua is developed at Lua.org, a laboratory of the Department of Computer + Science of PUC-Rio (the Pontifical Catholic University of Rio de Janeiro + in Brazil). + For more information about the authors, see http://www.lua.org/authors.html . + +(end of README) diff --git a/deps/lua/doc/contents.html b/deps/lua/doc/contents.html new file mode 100644 index 0000000..3d83da9 --- /dev/null +++ b/deps/lua/doc/contents.html @@ -0,0 +1,497 @@ + + + +Lua 5.1 Reference Manual - contents + + + + + + + +
+

+ +Lua 5.1 Reference Manual +

+ +

+The reference manual is the official definition of the Lua language. +For a complete introduction to Lua programming, see the book +Programming in Lua. + +

+This manual is also available as a book: +

+ + + +Lua 5.1 Reference Manual +
by R. Ierusalimschy, L. H. de Figueiredo, W. Celes +
Lua.org, August 2006 +
ISBN 85-903798-3-3 +
+
+ +

+Buy a copy +of this book and +help to support +the Lua project. + +

+start +· +contents +· +index +· +other versions +


+ +Copyright © 2006–2012 Lua.org, PUC-Rio. +Freely available under the terms of the +Lua license. + + +

Contents

+ + +

Index

+ + + + + + + +
+

Lua functions

+_G
+_VERSION
+

+ +assert
+collectgarbage
+dofile
+error
+getfenv
+getmetatable
+ipairs
+load
+loadfile
+loadstring
+module
+next
+pairs
+pcall
+print
+rawequal
+rawget
+rawset
+require
+select
+setfenv
+setmetatable
+tonumber
+tostring
+type
+unpack
+xpcall
+

+ +coroutine.create
+coroutine.resume
+coroutine.running
+coroutine.status
+coroutine.wrap
+coroutine.yield
+

+ +debug.debug
+debug.getfenv
+debug.gethook
+debug.getinfo
+debug.getlocal
+debug.getmetatable
+debug.getregistry
+debug.getupvalue
+debug.setfenv
+debug.sethook
+debug.setlocal
+debug.setmetatable
+debug.setupvalue
+debug.traceback
+ +

+

 

+file:close
+file:flush
+file:lines
+file:read
+file:seek
+file:setvbuf
+file:write
+

+ +io.close
+io.flush
+io.input
+io.lines
+io.open
+io.output
+io.popen
+io.read
+io.stderr
+io.stdin
+io.stdout
+io.tmpfile
+io.type
+io.write
+

+ +math.abs
+math.acos
+math.asin
+math.atan
+math.atan2
+math.ceil
+math.cos
+math.cosh
+math.deg
+math.exp
+math.floor
+math.fmod
+math.frexp
+math.huge
+math.ldexp
+math.log
+math.log10
+math.max
+math.min
+math.modf
+math.pi
+math.pow
+math.rad
+math.random
+math.randomseed
+math.sin
+math.sinh
+math.sqrt
+math.tan
+math.tanh
+

+ +os.clock
+os.date
+os.difftime
+os.execute
+os.exit
+os.getenv
+os.remove
+os.rename
+os.setlocale
+os.time
+os.tmpname
+

+ +package.cpath
+package.loaded
+package.loaders
+package.loadlib
+package.path
+package.preload
+package.seeall
+

+ +string.byte
+string.char
+string.dump
+string.find
+string.format
+string.gmatch
+string.gsub
+string.len
+string.lower
+string.match
+string.rep
+string.reverse
+string.sub
+string.upper
+

+ +table.concat
+table.insert
+table.maxn
+table.remove
+table.sort
+ +

+

C API

+lua_Alloc
+lua_CFunction
+lua_Debug
+lua_Hook
+lua_Integer
+lua_Number
+lua_Reader
+lua_State
+lua_Writer
+

+ +lua_atpanic
+lua_call
+lua_checkstack
+lua_close
+lua_concat
+lua_cpcall
+lua_createtable
+lua_dump
+lua_equal
+lua_error
+lua_gc
+lua_getallocf
+lua_getfenv
+lua_getfield
+lua_getglobal
+lua_gethook
+lua_gethookcount
+lua_gethookmask
+lua_getinfo
+lua_getlocal
+lua_getmetatable
+lua_getstack
+lua_gettable
+lua_gettop
+lua_getupvalue
+lua_insert
+lua_isboolean
+lua_iscfunction
+lua_isfunction
+lua_islightuserdata
+lua_isnil
+lua_isnone
+lua_isnoneornil
+lua_isnumber
+lua_isstring
+lua_istable
+lua_isthread
+lua_isuserdata
+lua_lessthan
+lua_load
+lua_newstate
+lua_newtable
+lua_newthread
+lua_newuserdata
+lua_next
+lua_objlen
+lua_pcall
+lua_pop
+lua_pushboolean
+lua_pushcclosure
+lua_pushcfunction
+lua_pushfstring
+lua_pushinteger
+lua_pushlightuserdata
+lua_pushliteral
+lua_pushlstring
+lua_pushnil
+lua_pushnumber
+lua_pushstring
+lua_pushthread
+lua_pushvalue
+lua_pushvfstring
+lua_rawequal
+lua_rawget
+lua_rawgeti
+lua_rawset
+lua_rawseti
+lua_register
+lua_remove
+lua_replace
+lua_resume
+lua_setallocf
+lua_setfenv
+lua_setfield
+lua_setglobal
+lua_sethook
+lua_setlocal
+lua_setmetatable
+lua_settable
+lua_settop
+lua_setupvalue
+lua_status
+lua_toboolean
+lua_tocfunction
+lua_tointeger
+lua_tolstring
+lua_tonumber
+lua_topointer
+lua_tostring
+lua_tothread
+lua_touserdata
+lua_type
+lua_typename
+lua_upvalueindex
+lua_xmove
+lua_yield
+ +

+

auxiliary library

+luaL_Buffer
+luaL_Reg
+

+ +luaL_addchar
+luaL_addlstring
+luaL_addsize
+luaL_addstring
+luaL_addvalue
+luaL_argcheck
+luaL_argerror
+luaL_buffinit
+luaL_callmeta
+luaL_checkany
+luaL_checkint
+luaL_checkinteger
+luaL_checklong
+luaL_checklstring
+luaL_checknumber
+luaL_checkoption
+luaL_checkstack
+luaL_checkstring
+luaL_checktype
+luaL_checkudata
+luaL_dofile
+luaL_dostring
+luaL_error
+luaL_getmetafield
+luaL_getmetatable
+luaL_gsub
+luaL_loadbuffer
+luaL_loadfile
+luaL_loadstring
+luaL_newmetatable
+luaL_newstate
+luaL_openlibs
+luaL_optint
+luaL_optinteger
+luaL_optlong
+luaL_optlstring
+luaL_optnumber
+luaL_optstring
+luaL_prepbuffer
+luaL_pushresult
+luaL_ref
+luaL_register
+luaL_typename
+luaL_typerror
+luaL_unref
+luaL_where
+ +

+

+ +


+ +Last update: +Mon Feb 13 18:53:32 BRST 2012 + + + + + diff --git a/deps/lua/doc/cover.png b/deps/lua/doc/cover.png new file mode 100644 index 0000000..2dbb198 Binary files /dev/null and b/deps/lua/doc/cover.png differ diff --git a/deps/lua/doc/logo.gif b/deps/lua/doc/logo.gif new file mode 100644 index 0000000..2f5e4ac Binary files /dev/null and b/deps/lua/doc/logo.gif differ diff --git a/deps/lua/doc/lua.1 b/deps/lua/doc/lua.1 new file mode 100644 index 0000000..24809cc --- /dev/null +++ b/deps/lua/doc/lua.1 @@ -0,0 +1,163 @@ +.\" $Id: lua.man,v 1.11 2006/01/06 16:03:34 lhf Exp $ +.TH LUA 1 "$Date: 2006/01/06 16:03:34 $" +.SH NAME +lua \- Lua interpreter +.SH SYNOPSIS +.B lua +[ +.I options +] +[ +.I script +[ +.I args +] +] +.SH DESCRIPTION +.B lua +is the stand-alone Lua interpreter. +It loads and executes Lua programs, +either in textual source form or +in precompiled binary form. +(Precompiled binaries are output by +.BR luac , +the Lua compiler.) +.B lua +can be used as a batch interpreter and also interactively. +.LP +The given +.I options +(see below) +are executed and then +the Lua program in file +.I script +is loaded and executed. +The given +.I args +are available to +.I script +as strings in a global table named +.BR arg . +If these arguments contain spaces or other characters special to the shell, +then they should be quoted +(but note that the quotes will be removed by the shell). +The arguments in +.B arg +start at 0, +which contains the string +.RI ' script '. +The index of the last argument is stored in +.BR arg.n . +The arguments given in the command line before +.IR script , +including the name of the interpreter, +are available in negative indices in +.BR arg . +.LP +At the very start, +before even handling the command line, +.B lua +executes the contents of the environment variable +.BR LUA_INIT , +if it is defined. +If the value of +.B LUA_INIT +is of the form +.RI '@ filename ', +then +.I filename +is executed. +Otherwise, the string is assumed to be a Lua statement and is executed. +.LP +Options start with +.B '\-' +and are described below. +You can use +.B "'\--'" +to signal the end of options. +.LP +If no arguments are given, +then +.B "\-v \-i" +is assumed when the standard input is a terminal; +otherwise, +.B "\-" +is assumed. +.LP +In interactive mode, +.B lua +prompts the user, +reads lines from the standard input, +and executes them as they are read. +If a line does not contain a complete statement, +then a secondary prompt is displayed and +lines are read until a complete statement is formed or +a syntax error is found. +So, one way to interrupt the reading of an incomplete statement is +to force a syntax error: +adding a +.B ';' +in the middle of a statement is a sure way of forcing a syntax error +(except inside multiline strings and comments; these must be closed explicitly). +If a line starts with +.BR '=' , +then +.B lua +displays the values of all the expressions in the remainder of the +line. The expressions must be separated by commas. +The primary prompt is the value of the global variable +.BR _PROMPT , +if this value is a string; +otherwise, the default prompt is used. +Similarly, the secondary prompt is the value of the global variable +.BR _PROMPT2 . +So, +to change the prompts, +set the corresponding variable to a string of your choice. +You can do that after calling the interpreter +or on the command line +(but in this case you have to be careful with quotes +if the prompt string contains a space; otherwise you may confuse the shell.) +The default prompts are "> " and ">> ". +.SH OPTIONS +.TP +.B \- +load and execute the standard input as a file, +that is, +not interactively, +even when the standard input is a terminal. +.TP +.BI \-e " stat" +execute statement +.IR stat . +You need to quote +.I stat +if it contains spaces, quotes, +or other characters special to the shell. +.TP +.B \-i +enter interactive mode after +.I script +is executed. +.TP +.BI \-l " name" +call +.BI require(' name ') +before executing +.IR script . +Typically used to load libraries. +.TP +.B \-v +show version information. +.SH "SEE ALSO" +.BR luac (1) +.br +http://www.lua.org/ +.SH DIAGNOSTICS +Error messages should be self explanatory. +.SH AUTHORS +R. Ierusalimschy, +L. H. de Figueiredo, +and +W. Celes +.\" EOF diff --git a/deps/lua/doc/lua.css b/deps/lua/doc/lua.css new file mode 100644 index 0000000..7fafbb1 --- /dev/null +++ b/deps/lua/doc/lua.css @@ -0,0 +1,83 @@ +body { + color: #000000 ; + background-color: #FFFFFF ; + font-family: Helvetica, Arial, sans-serif ; + text-align: justify ; + margin-right: 30px ; + margin-left: 30px ; +} + +h1, h2, h3, h4 { + font-family: Verdana, Geneva, sans-serif ; + font-weight: normal ; + font-style: italic ; +} + +h2 { + padding-top: 0.4em ; + padding-bottom: 0.4em ; + padding-left: 30px ; + padding-right: 30px ; + margin-left: -30px ; + background-color: #E0E0FF ; +} + +h3 { + padding-left: 0.5em ; + border-left: solid #E0E0FF 1em ; +} + +table h3 { + padding-left: 0px ; + border-left: none ; +} + +a:link { + color: #000080 ; + background-color: inherit ; + text-decoration: none ; +} + +a:visited { + background-color: inherit ; + text-decoration: none ; +} + +a:link:hover, a:visited:hover { + color: #000080 ; + background-color: #E0E0FF ; +} + +a:link:active, a:visited:active { + color: #FF0000 ; +} + +hr { + border: 0 ; + height: 1px ; + color: #a0a0a0 ; + background-color: #a0a0a0 ; +} + +:target { + background-color: #F8F8F8 ; + padding: 8px ; + border: solid #a0a0a0 2px ; +} + +.footer { + color: gray ; + font-size: small ; +} + +input[type=text] { + border: solid #a0a0a0 2px ; + border-radius: 2em ; + -moz-border-radius: 2em ; + background-image: url('images/search.png') ; + background-repeat: no-repeat; + background-position: 4px center ; + padding-left: 20px ; + height: 2em ; +} + diff --git a/deps/lua/doc/lua.html b/deps/lua/doc/lua.html new file mode 100644 index 0000000..1d435ab --- /dev/null +++ b/deps/lua/doc/lua.html @@ -0,0 +1,172 @@ + + + +LUA man page + + + + + +

NAME

+lua - Lua interpreter +

SYNOPSIS

+lua +[ +options +] +[ +script +[ +args +] +] +

DESCRIPTION

+lua +is the stand-alone Lua interpreter. +It loads and executes Lua programs, +either in textual source form or +in precompiled binary form. +(Precompiled binaries are output by +luac, +the Lua compiler.) +lua +can be used as a batch interpreter and also interactively. +

+The given +options +(see below) +are executed and then +the Lua program in file +script +is loaded and executed. +The given +args +are available to +script +as strings in a global table named +arg. +If these arguments contain spaces or other characters special to the shell, +then they should be quoted +(but note that the quotes will be removed by the shell). +The arguments in +arg +start at 0, +which contains the string +'script'. +The index of the last argument is stored in +arg.n. +The arguments given in the command line before +script, +including the name of the interpreter, +are available in negative indices in +arg. +

+At the very start, +before even handling the command line, +lua +executes the contents of the environment variable +LUA_INIT, +if it is defined. +If the value of +LUA_INIT +is of the form +'@filename', +then +filename +is executed. +Otherwise, the string is assumed to be a Lua statement and is executed. +

+Options start with +'-' +and are described below. +You can use +'--' +to signal the end of options. +

+If no arguments are given, +then +"-v -i" +is assumed when the standard input is a terminal; +otherwise, +"-" +is assumed. +

+In interactive mode, +lua +prompts the user, +reads lines from the standard input, +and executes them as they are read. +If a line does not contain a complete statement, +then a secondary prompt is displayed and +lines are read until a complete statement is formed or +a syntax error is found. +So, one way to interrupt the reading of an incomplete statement is +to force a syntax error: +adding a +';' +in the middle of a statement is a sure way of forcing a syntax error +(except inside multiline strings and comments; these must be closed explicitly). +If a line starts with +'=', +then +lua +displays the values of all the expressions in the remainder of the +line. The expressions must be separated by commas. +The primary prompt is the value of the global variable +_PROMPT, +if this value is a string; +otherwise, the default prompt is used. +Similarly, the secondary prompt is the value of the global variable +_PROMPT2. +So, +to change the prompts, +set the corresponding variable to a string of your choice. +You can do that after calling the interpreter +or on the command line +(but in this case you have to be careful with quotes +if the prompt string contains a space; otherwise you may confuse the shell.) +The default prompts are "> " and ">> ". +

OPTIONS

+

+- +load and execute the standard input as a file, +that is, +not interactively, +even when the standard input is a terminal. +

+-e stat +execute statement +stat. +You need to quote +stat +if it contains spaces, quotes, +or other characters special to the shell. +

+-i +enter interactive mode after +script +is executed. +

+-l name +call +require('name') +before executing +script. +Typically used to load libraries. +

+-v +show version information. +

SEE ALSO

+luac(1) +
+http://www.lua.org/ +

DIAGNOSTICS

+Error messages should be self explanatory. +

AUTHORS

+R. Ierusalimschy, +L. H. de Figueiredo, +and +W. Celes + + + diff --git a/deps/lua/doc/luac.1 b/deps/lua/doc/luac.1 new file mode 100644 index 0000000..d814678 --- /dev/null +++ b/deps/lua/doc/luac.1 @@ -0,0 +1,136 @@ +.\" $Id: luac.man,v 1.28 2006/01/06 16:03:34 lhf Exp $ +.TH LUAC 1 "$Date: 2006/01/06 16:03:34 $" +.SH NAME +luac \- Lua compiler +.SH SYNOPSIS +.B luac +[ +.I options +] [ +.I filenames +] +.SH DESCRIPTION +.B luac +is the Lua compiler. +It translates programs written in the Lua programming language +into binary files that can be later loaded and executed. +.LP +The main advantages of precompiling chunks are: +faster loading, +protecting source code from accidental user changes, +and +off-line syntax checking. +.LP +Pre-compiling does not imply faster execution +because in Lua chunks are always compiled into bytecodes before being executed. +.B luac +simply allows those bytecodes to be saved in a file for later execution. +.LP +Pre-compiled chunks are not necessarily smaller than the corresponding source. +The main goal in pre-compiling is faster loading. +.LP +The binary files created by +.B luac +are portable only among architectures with the same word size and byte order. +.LP +.B luac +produces a single output file containing the bytecodes +for all source files given. +By default, +the output file is named +.BR luac.out , +but you can change this with the +.B \-o +option. +.LP +In the command line, +you can mix +text files containing Lua source and +binary files containing precompiled chunks. +This is useful to combine several precompiled chunks, +even from different (but compatible) platforms, +into a single precompiled chunk. +.LP +You can use +.B "'\-'" +to indicate the standard input as a source file +and +.B "'\--'" +to signal the end of options +(that is, +all remaining arguments will be treated as files even if they start with +.BR "'\-'" ). +.LP +The internal format of the binary files produced by +.B luac +is likely to change when a new version of Lua is released. +So, +save the source files of all Lua programs that you precompile. +.LP +.SH OPTIONS +Options must be separate. +.TP +.B \-l +produce a listing of the compiled bytecode for Lua's virtual machine. +Listing bytecodes is useful to learn about Lua's virtual machine. +If no files are given, then +.B luac +loads +.B luac.out +and lists its contents. +.TP +.BI \-o " file" +output to +.IR file , +instead of the default +.BR luac.out . +(You can use +.B "'\-'" +for standard output, +but not on platforms that open standard output in text mode.) +The output file may be a source file because +all files are loaded before the output file is written. +Be careful not to overwrite precious files. +.TP +.B \-p +load files but do not generate any output file. +Used mainly for syntax checking and for testing precompiled chunks: +corrupted files will probably generate errors when loaded. +Lua always performs a thorough integrity test on precompiled chunks. +Bytecode that passes this test is completely safe, +in the sense that it will not break the interpreter. +However, +there is no guarantee that such code does anything sensible. +(None can be given, because the halting problem is unsolvable.) +If no files are given, then +.B luac +loads +.B luac.out +and tests its contents. +No messages are displayed if the file passes the integrity test. +.TP +.B \-s +strip debug information before writing the output file. +This saves some space in very large chunks, +but if errors occur when running a stripped chunk, +then the error messages may not contain the full information they usually do. +For instance, +line numbers and names of local variables are lost. +.TP +.B \-v +show version information. +.SH FILES +.TP 15 +.B luac.out +default output file +.SH "SEE ALSO" +.BR lua (1) +.br +http://www.lua.org/ +.SH DIAGNOSTICS +Error messages should be self explanatory. +.SH AUTHORS +L. H. de Figueiredo, +R. Ierusalimschy and +W. Celes +.\" EOF diff --git a/deps/lua/doc/luac.html b/deps/lua/doc/luac.html new file mode 100644 index 0000000..179ffe8 --- /dev/null +++ b/deps/lua/doc/luac.html @@ -0,0 +1,145 @@ + + + +LUAC man page + + + + + +

NAME

+luac - Lua compiler +

SYNOPSIS

+luac +[ +options +] [ +filenames +] +

DESCRIPTION

+luac +is the Lua compiler. +It translates programs written in the Lua programming language +into binary files that can be later loaded and executed. +

+The main advantages of precompiling chunks are: +faster loading, +protecting source code from accidental user changes, +and +off-line syntax checking. +

+Precompiling does not imply faster execution +because in Lua chunks are always compiled into bytecodes before being executed. +luac +simply allows those bytecodes to be saved in a file for later execution. +

+Precompiled chunks are not necessarily smaller than the corresponding source. +The main goal in precompiling is faster loading. +

+The binary files created by +luac +are portable only among architectures with the same word size and byte order. +

+luac +produces a single output file containing the bytecodes +for all source files given. +By default, +the output file is named +luac.out, +but you can change this with the +-o +option. +

+In the command line, +you can mix +text files containing Lua source and +binary files containing precompiled chunks. +This is useful because several precompiled chunks, +even from different (but compatible) platforms, +can be combined into a single precompiled chunk. +

+You can use +'-' +to indicate the standard input as a source file +and +'--' +to signal the end of options +(that is, +all remaining arguments will be treated as files even if they start with +'-'). +

+The internal format of the binary files produced by +luac +is likely to change when a new version of Lua is released. +So, +save the source files of all Lua programs that you precompile. +

+

OPTIONS

+Options must be separate. +

+-l +produce a listing of the compiled bytecode for Lua's virtual machine. +Listing bytecodes is useful to learn about Lua's virtual machine. +If no files are given, then +luac +loads +luac.out +and lists its contents. +

+-o file +output to +file, +instead of the default +luac.out. +(You can use +'-' +for standard output, +but not on platforms that open standard output in text mode.) +The output file may be a source file because +all files are loaded before the output file is written. +Be careful not to overwrite precious files. +

+-p +load files but do not generate any output file. +Used mainly for syntax checking and for testing precompiled chunks: +corrupted files will probably generate errors when loaded. +Lua always performs a thorough integrity test on precompiled chunks. +Bytecode that passes this test is completely safe, +in the sense that it will not break the interpreter. +However, +there is no guarantee that such code does anything sensible. +(None can be given, because the halting problem is unsolvable.) +If no files are given, then +luac +loads +luac.out +and tests its contents. +No messages are displayed if the file passes the integrity test. +

+-s +strip debug information before writing the output file. +This saves some space in very large chunks, +but if errors occur when running a stripped chunk, +then the error messages may not contain the full information they usually do. +For instance, +line numbers and names of local variables are lost. +

+-v +show version information. +

FILES

+

+luac.out +default output file +

SEE ALSO

+lua(1) +
+http://www.lua.org/ +

DIAGNOSTICS

+Error messages should be self explanatory. +

AUTHORS

+L. H. de Figueiredo, +R. Ierusalimschy and +W. Celes + + + diff --git a/deps/lua/doc/manual.css b/deps/lua/doc/manual.css new file mode 100644 index 0000000..b49b362 --- /dev/null +++ b/deps/lua/doc/manual.css @@ -0,0 +1,24 @@ +h3 code { + font-family: inherit ; + font-size: inherit ; +} + +pre, code { + font-size: 12pt ; +} + +span.apii { + float: right ; + font-family: inherit ; + font-style: normal ; + font-size: small ; + color: gray ; +} + +p+h1, ul+h1 { + padding-top: 0.4em ; + padding-bottom: 0.4em ; + padding-left: 30px ; + margin-left: -30px ; + background-color: #E0E0FF ; +} diff --git a/deps/lua/doc/manual.html b/deps/lua/doc/manual.html new file mode 100644 index 0000000..4e41683 --- /dev/null +++ b/deps/lua/doc/manual.html @@ -0,0 +1,8804 @@ + + + + +Lua 5.1 Reference Manual + + + + + + + +
+

+ +Lua 5.1 Reference Manual +

+ +by Roberto Ierusalimschy, Luiz Henrique de Figueiredo, Waldemar Celes +

+ +Copyright © 2006–2012 Lua.org, PUC-Rio. +Freely available under the terms of the +Lua license. + +


+

+ +contents +· +index +· +other versions + + +

+ + + + + + +

1 - Introduction

+ +

+Lua is an extension programming language designed to support +general procedural programming with data description +facilities. +It also offers good support for object-oriented programming, +functional programming, and data-driven programming. +Lua is intended to be used as a powerful, light-weight +scripting language for any program that needs one. +Lua is implemented as a library, written in clean C +(that is, in the common subset of ANSI C and C++). + + +

+Being an extension language, Lua has no notion of a "main" program: +it only works embedded in a host client, +called the embedding program or simply the host. +This host program can invoke functions to execute a piece of Lua code, +can write and read Lua variables, +and can register C functions to be called by Lua code. +Through the use of C functions, Lua can be augmented to cope with +a wide range of different domains, +thus creating customized programming languages sharing a syntactical framework. +The Lua distribution includes a sample host program called lua, +which uses the Lua library to offer a complete, stand-alone Lua interpreter. + + +

+Lua is free software, +and is provided as usual with no guarantees, +as stated in its license. +The implementation described in this manual is available +at Lua's official web site, www.lua.org. + + +

+Like any other reference manual, +this document is dry in places. +For a discussion of the decisions behind the design of Lua, +see the technical papers available at Lua's web site. +For a detailed introduction to programming in Lua, +see Roberto's book, Programming in Lua (Second Edition). + + + +

2 - The Language

+ +

+This section describes the lexis, the syntax, and the semantics of Lua. +In other words, +this section describes +which tokens are valid, +how they can be combined, +and what their combinations mean. + + +

+The language constructs will be explained using the usual extended BNF notation, +in which +{a} means 0 or more a's, and +[a] means an optional a. +Non-terminals are shown like non-terminal, +keywords are shown like kword, +and other terminal symbols are shown like `=´. +The complete syntax of Lua can be found in §8 +at the end of this manual. + + + +

2.1 - Lexical Conventions

+ +

+Names +(also called identifiers) +in Lua can be any string of letters, +digits, and underscores, +not beginning with a digit. +This coincides with the definition of names in most languages. +(The definition of letter depends on the current locale: +any character considered alphabetic by the current locale +can be used in an identifier.) +Identifiers are used to name variables and table fields. + + +

+The following keywords are reserved +and cannot be used as names: + + +

+     and       break     do        else      elseif
+     end       false     for       function  if
+     in        local     nil       not       or
+     repeat    return    then      true      until     while
+
+ +

+Lua is a case-sensitive language: +and is a reserved word, but And and AND +are two different, valid names. +As a convention, names starting with an underscore followed by +uppercase letters (such as _VERSION) +are reserved for internal global variables used by Lua. + + +

+The following strings denote other tokens: + +

+     +     -     *     /     %     ^     #
+     ==    ~=    <=    >=    <     >     =
+     (     )     {     }     [     ]
+     ;     :     ,     .     ..    ...
+
+ +

+Literal strings +can be delimited by matching single or double quotes, +and can contain the following C-like escape sequences: +'\a' (bell), +'\b' (backspace), +'\f' (form feed), +'\n' (newline), +'\r' (carriage return), +'\t' (horizontal tab), +'\v' (vertical tab), +'\\' (backslash), +'\"' (quotation mark [double quote]), +and '\'' (apostrophe [single quote]). +Moreover, a backslash followed by a real newline +results in a newline in the string. +A character in a string can also be specified by its numerical value +using the escape sequence \ddd, +where ddd is a sequence of up to three decimal digits. +(Note that if a numerical escape is to be followed by a digit, +it must be expressed using exactly three digits.) +Strings in Lua can contain any 8-bit value, including embedded zeros, +which can be specified as '\0'. + + +

+Literal strings can also be defined using a long format +enclosed by long brackets. +We define an opening long bracket of level n as an opening +square bracket followed by n equal signs followed by another +opening square bracket. +So, an opening long bracket of level 0 is written as [[, +an opening long bracket of level 1 is written as [=[, +and so on. +A closing long bracket is defined similarly; +for instance, a closing long bracket of level 4 is written as ]====]. +A long string starts with an opening long bracket of any level and +ends at the first closing long bracket of the same level. +Literals in this bracketed form can run for several lines, +do not interpret any escape sequences, +and ignore long brackets of any other level. +They can contain anything except a closing bracket of the proper level. + + +

+For convenience, +when the opening long bracket is immediately followed by a newline, +the newline is not included in the string. +As an example, in a system using ASCII +(in which 'a' is coded as 97, +newline is coded as 10, and '1' is coded as 49), +the five literal strings below denote the same string: + +

+     a = 'alo\n123"'
+     a = "alo\n123\""
+     a = '\97lo\10\04923"'
+     a = [[alo
+     123"]]
+     a = [==[
+     alo
+     123"]==]
+
+ +

+A numerical constant can be written with an optional decimal part +and an optional decimal exponent. +Lua also accepts integer hexadecimal constants, +by prefixing them with 0x. +Examples of valid numerical constants are + +

+     3   3.0   3.1416   314.16e-2   0.31416E1   0xff   0x56
+
+ +

+A comment starts with a double hyphen (--) +anywhere outside a string. +If the text immediately after -- is not an opening long bracket, +the comment is a short comment, +which runs until the end of the line. +Otherwise, it is a long comment, +which runs until the corresponding closing long bracket. +Long comments are frequently used to disable code temporarily. + + + + + +

2.2 - Values and Types

+ +

+Lua is a dynamically typed language. +This means that +variables do not have types; only values do. +There are no type definitions in the language. +All values carry their own type. + + +

+All values in Lua are first-class values. +This means that all values can be stored in variables, +passed as arguments to other functions, and returned as results. + + +

+There are eight basic types in Lua: +nil, boolean, number, +string, function, userdata, +thread, and table. +Nil is the type of the value nil, +whose main property is to be different from any other value; +it usually represents the absence of a useful value. +Boolean is the type of the values false and true. +Both nil and false make a condition false; +any other value makes it true. +Number represents real (double-precision floating-point) numbers. +(It is easy to build Lua interpreters that use other +internal representations for numbers, +such as single-precision float or long integers; +see file luaconf.h.) +String represents arrays of characters. + +Lua is 8-bit clean: +strings can contain any 8-bit character, +including embedded zeros ('\0') (see §2.1). + + +

+Lua can call (and manipulate) functions written in Lua and +functions written in C +(see §2.5.8). + + +

+The type userdata is provided to allow arbitrary C data to +be stored in Lua variables. +This type corresponds to a block of raw memory +and has no pre-defined operations in Lua, +except assignment and identity test. +However, by using metatables, +the programmer can define operations for userdata values +(see §2.8). +Userdata values cannot be created or modified in Lua, +only through the C API. +This guarantees the integrity of data owned by the host program. + + +

+The type thread represents independent threads of execution +and it is used to implement coroutines (see §2.11). +Do not confuse Lua threads with operating-system threads. +Lua supports coroutines on all systems, +even those that do not support threads. + + +

+The type table implements associative arrays, +that is, arrays that can be indexed not only with numbers, +but with any value (except nil). +Tables can be heterogeneous; +that is, they can contain values of all types (except nil). +Tables are the sole data structuring mechanism in Lua; +they can be used to represent ordinary arrays, +symbol tables, sets, records, graphs, trees, etc. +To represent records, Lua uses the field name as an index. +The language supports this representation by +providing a.name as syntactic sugar for a["name"]. +There are several convenient ways to create tables in Lua +(see §2.5.7). + + +

+Like indices, +the value of a table field can be of any type (except nil). +In particular, +because functions are first-class values, +table fields can contain functions. +Thus tables can also carry methods (see §2.5.9). + + +

+Tables, functions, threads, and (full) userdata values are objects: +variables do not actually contain these values, +only references to them. +Assignment, parameter passing, and function returns +always manipulate references to such values; +these operations do not imply any kind of copy. + + +

+The library function type returns a string describing the type +of a given value. + + + +

2.2.1 - Coercion

+ +

+Lua provides automatic conversion between +string and number values at run time. +Any arithmetic operation applied to a string tries to convert +this string to a number, following the usual conversion rules. +Conversely, whenever a number is used where a string is expected, +the number is converted to a string, in a reasonable format. +For complete control over how numbers are converted to strings, +use the format function from the string library +(see string.format). + + + + + + + +

2.3 - Variables

+ +

+Variables are places that store values. + +There are three kinds of variables in Lua: +global variables, local variables, and table fields. + + +

+A single name can denote a global variable or a local variable +(or a function's formal parameter, +which is a particular kind of local variable): + +

+	var ::= Name
+

+Name denotes identifiers, as defined in §2.1. + + +

+Any variable is assumed to be global unless explicitly declared +as a local (see §2.4.7). +Local variables are lexically scoped: +local variables can be freely accessed by functions +defined inside their scope (see §2.6). + + +

+Before the first assignment to a variable, its value is nil. + + +

+Square brackets are used to index a table: + +

+	var ::= prefixexp `[´ exp `]´
+

+The meaning of accesses to global variables +and table fields can be changed via metatables. +An access to an indexed variable t[i] is equivalent to +a call gettable_event(t,i). +(See §2.8 for a complete description of the +gettable_event function. +This function is not defined or callable in Lua. +We use it here only for explanatory purposes.) + + +

+The syntax var.Name is just syntactic sugar for +var["Name"]: + +

+	var ::= prefixexp `.´ Name
+
+ +

+All global variables live as fields in ordinary Lua tables, +called environment tables or simply +environments (see §2.9). +Each function has its own reference to an environment, +so that all global variables in this function +will refer to this environment table. +When a function is created, +it inherits the environment from the function that created it. +To get the environment table of a Lua function, +you call getfenv. +To replace it, +you call setfenv. +(You can only manipulate the environment of C functions +through the debug library; (see §5.9).) + + +

+An access to a global variable x +is equivalent to _env.x, +which in turn is equivalent to + +

+     gettable_event(_env, "x")
+

+where _env is the environment of the running function. +(See §2.8 for a complete description of the +gettable_event function. +This function is not defined or callable in Lua. +Similarly, the _env variable is not defined in Lua. +We use them here only for explanatory purposes.) + + + + + +

2.4 - Statements

+ +

+Lua supports an almost conventional set of statements, +similar to those in Pascal or C. +This set includes +assignments, control structures, function calls, +and variable declarations. + + + +

2.4.1 - Chunks

+ +

+The unit of execution of Lua is called a chunk. +A chunk is simply a sequence of statements, +which are executed sequentially. +Each statement can be optionally followed by a semicolon: + +

+	chunk ::= {stat [`;´]}
+

+There are no empty statements and thus ';;' is not legal. + + +

+Lua handles a chunk as the body of an anonymous function +with a variable number of arguments +(see §2.5.9). +As such, chunks can define local variables, +receive arguments, and return values. + + +

+A chunk can be stored in a file or in a string inside the host program. +To execute a chunk, +Lua first pre-compiles the chunk into instructions for a virtual machine, +and then it executes the compiled code +with an interpreter for the virtual machine. + + +

+Chunks can also be pre-compiled into binary form; +see program luac for details. +Programs in source and compiled forms are interchangeable; +Lua automatically detects the file type and acts accordingly. + + + + + + +

2.4.2 - Blocks

+A block is a list of statements; +syntactically, a block is the same as a chunk: + +

+	block ::= chunk
+
+ +

+A block can be explicitly delimited to produce a single statement: + +

+	stat ::= do block end
+

+Explicit blocks are useful +to control the scope of variable declarations. +Explicit blocks are also sometimes used to +add a return or break statement in the middle +of another block (see §2.4.4). + + + + + +

2.4.3 - Assignment

+ +

+Lua allows multiple assignments. +Therefore, the syntax for assignment +defines a list of variables on the left side +and a list of expressions on the right side. +The elements in both lists are separated by commas: + +

+	stat ::= varlist `=´ explist
+	varlist ::= var {`,´ var}
+	explist ::= exp {`,´ exp}
+

+Expressions are discussed in §2.5. + + +

+Before the assignment, +the list of values is adjusted to the length of +the list of variables. +If there are more values than needed, +the excess values are thrown away. +If there are fewer values than needed, +the list is extended with as many nil's as needed. +If the list of expressions ends with a function call, +then all values returned by that call enter the list of values, +before the adjustment +(except when the call is enclosed in parentheses; see §2.5). + + +

+The assignment statement first evaluates all its expressions +and only then are the assignments performed. +Thus the code + +

+     i = 3
+     i, a[i] = i+1, 20
+

+sets a[3] to 20, without affecting a[4] +because the i in a[i] is evaluated (to 3) +before it is assigned 4. +Similarly, the line + +

+     x, y = y, x
+

+exchanges the values of x and y, +and + +

+     x, y, z = y, z, x
+

+cyclically permutes the values of x, y, and z. + + +

+The meaning of assignments to global variables +and table fields can be changed via metatables. +An assignment to an indexed variable t[i] = val is equivalent to +settable_event(t,i,val). +(See §2.8 for a complete description of the +settable_event function. +This function is not defined or callable in Lua. +We use it here only for explanatory purposes.) + + +

+An assignment to a global variable x = val +is equivalent to the assignment +_env.x = val, +which in turn is equivalent to + +

+     settable_event(_env, "x", val)
+

+where _env is the environment of the running function. +(The _env variable is not defined in Lua. +We use it here only for explanatory purposes.) + + + + + +

2.4.4 - Control Structures

+The control structures +if, while, and repeat have the usual meaning and +familiar syntax: + + + + +

+	stat ::= while exp do block end
+	stat ::= repeat block until exp
+	stat ::= if exp then block {elseif exp then block} [else block] end
+

+Lua also has a for statement, in two flavors (see §2.4.5). + + +

+The condition expression of a +control structure can return any value. +Both false and nil are considered false. +All values different from nil and false are considered true +(in particular, the number 0 and the empty string are also true). + + +

+In the repeatuntil loop, +the inner block does not end at the until keyword, +but only after the condition. +So, the condition can refer to local variables +declared inside the loop block. + + +

+The return statement is used to return values +from a function or a chunk (which is just a function). + +Functions and chunks can return more than one value, +and so the syntax for the return statement is + +

+	stat ::= return [explist]
+
+ +

+The break statement is used to terminate the execution of a +while, repeat, or for loop, +skipping to the next statement after the loop: + + +

+	stat ::= break
+

+A break ends the innermost enclosing loop. + + +

+The return and break +statements can only be written as the last statement of a block. +If it is really necessary to return or break in the +middle of a block, +then an explicit inner block can be used, +as in the idioms +do return end and do break end, +because now return and break are the last statements in +their (inner) blocks. + + + + + +

2.4.5 - For Statement

+ +

+ +The for statement has two forms: +one numeric and one generic. + + +

+The numeric for loop repeats a block of code while a +control variable runs through an arithmetic progression. +It has the following syntax: + +

+	stat ::= for Name `=´ exp `,´ exp [`,´ exp] do block end
+

+The block is repeated for name starting at the value of +the first exp, until it passes the second exp by steps of the +third exp. +More precisely, a for statement like + +

+     for v = e1, e2, e3 do block end
+

+is equivalent to the code: + +

+     do
+       local var, limit, step = tonumber(e1), tonumber(e2), tonumber(e3)
+       if not (var and limit and step) then error() end
+       while (step > 0 and var <= limit) or (step <= 0 and var >= limit) do
+         local v = var
+         block
+         var = var + step
+       end
+     end
+

+Note the following: + +

    + +
  • +All three control expressions are evaluated only once, +before the loop starts. +They must all result in numbers. +
  • + +
  • +var, limit, and step are invisible variables. +The names shown here are for explanatory purposes only. +
  • + +
  • +If the third expression (the step) is absent, +then a step of 1 is used. +
  • + +
  • +You can use break to exit a for loop. +
  • + +
  • +The loop variable v is local to the loop; +you cannot use its value after the for ends or is broken. +If you need this value, +assign it to another variable before breaking or exiting the loop. +
  • + +
+ +

+The generic for statement works over functions, +called iterators. +On each iteration, the iterator function is called to produce a new value, +stopping when this new value is nil. +The generic for loop has the following syntax: + +

+	stat ::= for namelist in explist do block end
+	namelist ::= Name {`,´ Name}
+

+A for statement like + +

+     for var_1, ···, var_n in explist do block end
+

+is equivalent to the code: + +

+     do
+       local f, s, var = explist
+       while true do
+         local var_1, ···, var_n = f(s, var)
+         var = var_1
+         if var == nil then break end
+         block
+       end
+     end
+

+Note the following: + +

    + +
  • +explist is evaluated only once. +Its results are an iterator function, +a state, +and an initial value for the first iterator variable. +
  • + +
  • +f, s, and var are invisible variables. +The names are here for explanatory purposes only. +
  • + +
  • +You can use break to exit a for loop. +
  • + +
  • +The loop variables var_i are local to the loop; +you cannot use their values after the for ends. +If you need these values, +then assign them to other variables before breaking or exiting the loop. +
  • + +
+ + + + +

2.4.6 - Function Calls as Statements

+To allow possible side-effects, +function calls can be executed as statements: + +

+	stat ::= functioncall
+

+In this case, all returned values are thrown away. +Function calls are explained in §2.5.8. + + + + + +

2.4.7 - Local Declarations

+Local variables can be declared anywhere inside a block. +The declaration can include an initial assignment: + +

+	stat ::= local namelist [`=´ explist]
+

+If present, an initial assignment has the same semantics +of a multiple assignment (see §2.4.3). +Otherwise, all variables are initialized with nil. + + +

+A chunk is also a block (see §2.4.1), +and so local variables can be declared in a chunk outside any explicit block. +The scope of such local variables extends until the end of the chunk. + + +

+The visibility rules for local variables are explained in §2.6. + + + + + + + +

2.5 - Expressions

+ +

+The basic expressions in Lua are the following: + +

+	exp ::= prefixexp
+	exp ::= nil | false | true
+	exp ::= Number
+	exp ::= String
+	exp ::= function
+	exp ::= tableconstructor
+	exp ::= `...´
+	exp ::= exp binop exp
+	exp ::= unop exp
+	prefixexp ::= var | functioncall | `(´ exp `)´
+
+ +

+Numbers and literal strings are explained in §2.1; +variables are explained in §2.3; +function definitions are explained in §2.5.9; +function calls are explained in §2.5.8; +table constructors are explained in §2.5.7. +Vararg expressions, +denoted by three dots ('...'), can only be used when +directly inside a vararg function; +they are explained in §2.5.9. + + +

+Binary operators comprise arithmetic operators (see §2.5.1), +relational operators (see §2.5.2), logical operators (see §2.5.3), +and the concatenation operator (see §2.5.4). +Unary operators comprise the unary minus (see §2.5.1), +the unary not (see §2.5.3), +and the unary length operator (see §2.5.5). + + +

+Both function calls and vararg expressions can result in multiple values. +If an expression is used as a statement +(only possible for function calls (see §2.4.6)), +then its return list is adjusted to zero elements, +thus discarding all returned values. +If an expression is used as the last (or the only) element +of a list of expressions, +then no adjustment is made +(unless the call is enclosed in parentheses). +In all other contexts, +Lua adjusts the result list to one element, +discarding all values except the first one. + + +

+Here are some examples: + +

+     f()                -- adjusted to 0 results
+     g(f(), x)          -- f() is adjusted to 1 result
+     g(x, f())          -- g gets x plus all results from f()
+     a,b,c = f(), x     -- f() is adjusted to 1 result (c gets nil)
+     a,b = ...          -- a gets the first vararg parameter, b gets
+                        -- the second (both a and b can get nil if there
+                        -- is no corresponding vararg parameter)
+     
+     a,b,c = x, f()     -- f() is adjusted to 2 results
+     a,b,c = f()        -- f() is adjusted to 3 results
+     return f()         -- returns all results from f()
+     return ...         -- returns all received vararg parameters
+     return x,y,f()     -- returns x, y, and all results from f()
+     {f()}              -- creates a list with all results from f()
+     {...}              -- creates a list with all vararg parameters
+     {f(), nil}         -- f() is adjusted to 1 result
+
+ +

+Any expression enclosed in parentheses always results in only one value. +Thus, +(f(x,y,z)) is always a single value, +even if f returns several values. +(The value of (f(x,y,z)) is the first value returned by f +or nil if f does not return any values.) + + + +

2.5.1 - Arithmetic Operators

+Lua supports the usual arithmetic operators: +the binary + (addition), +- (subtraction), * (multiplication), +/ (division), % (modulo), and ^ (exponentiation); +and unary - (negation). +If the operands are numbers, or strings that can be converted to +numbers (see §2.2.1), +then all operations have the usual meaning. +Exponentiation works for any exponent. +For instance, x^(-0.5) computes the inverse of the square root of x. +Modulo is defined as + +

+     a % b == a - math.floor(a/b)*b
+

+That is, it is the remainder of a division that rounds +the quotient towards minus infinity. + + + + + +

2.5.2 - Relational Operators

+The relational operators in Lua are + +

+     ==    ~=    <     >     <=    >=
+

+These operators always result in false or true. + + +

+Equality (==) first compares the type of its operands. +If the types are different, then the result is false. +Otherwise, the values of the operands are compared. +Numbers and strings are compared in the usual way. +Objects (tables, userdata, threads, and functions) +are compared by reference: +two objects are considered equal only if they are the same object. +Every time you create a new object +(a table, userdata, thread, or function), +this new object is different from any previously existing object. + + +

+You can change the way that Lua compares tables and userdata +by using the "eq" metamethod (see §2.8). + + +

+The conversion rules of §2.2.1 +do not apply to equality comparisons. +Thus, "0"==0 evaluates to false, +and t[0] and t["0"] denote different +entries in a table. + + +

+The operator ~= is exactly the negation of equality (==). + + +

+The order operators work as follows. +If both arguments are numbers, then they are compared as such. +Otherwise, if both arguments are strings, +then their values are compared according to the current locale. +Otherwise, Lua tries to call the "lt" or the "le" +metamethod (see §2.8). +A comparison a > b is translated to b < a +and a >= b is translated to b <= a. + + + + + +

2.5.3 - Logical Operators

+The logical operators in Lua are +and, or, and not. +Like the control structures (see §2.4.4), +all logical operators consider both false and nil as false +and anything else as true. + + +

+The negation operator not always returns false or true. +The conjunction operator and returns its first argument +if this value is false or nil; +otherwise, and returns its second argument. +The disjunction operator or returns its first argument +if this value is different from nil and false; +otherwise, or returns its second argument. +Both and and or use short-cut evaluation; +that is, +the second operand is evaluated only if necessary. +Here are some examples: + +

+     10 or 20            --> 10
+     10 or error()       --> 10
+     nil or "a"          --> "a"
+     nil and 10          --> nil
+     false and error()   --> false
+     false and nil       --> false
+     false or nil        --> nil
+     10 and 20           --> 20
+

+(In this manual, +--> indicates the result of the preceding expression.) + + + + + +

2.5.4 - Concatenation

+The string concatenation operator in Lua is +denoted by two dots ('..'). +If both operands are strings or numbers, then they are converted to +strings according to the rules mentioned in §2.2.1. +Otherwise, the "concat" metamethod is called (see §2.8). + + + + + +

2.5.5 - The Length Operator

+ +

+The length operator is denoted by the unary operator #. +The length of a string is its number of bytes +(that is, the usual meaning of string length when each +character is one byte). + + +

+The length of a table t is defined to be any +integer index n +such that t[n] is not nil and t[n+1] is nil; +moreover, if t[1] is nil, n can be zero. +For a regular array, with non-nil values from 1 to a given n, +its length is exactly that n, +the index of its last value. +If the array has "holes" +(that is, nil values between other non-nil values), +then #t can be any of the indices that +directly precedes a nil value +(that is, it may consider any such nil value as the end of +the array). + + + + + +

2.5.6 - Precedence

+Operator precedence in Lua follows the table below, +from lower to higher priority: + +

+     or
+     and
+     <     >     <=    >=    ~=    ==
+     ..
+     +     -
+     *     /     %
+     not   #     - (unary)
+     ^
+

+As usual, +you can use parentheses to change the precedences of an expression. +The concatenation ('..') and exponentiation ('^') +operators are right associative. +All other binary operators are left associative. + + + + + +

2.5.7 - Table Constructors

+Table constructors are expressions that create tables. +Every time a constructor is evaluated, a new table is created. +A constructor can be used to create an empty table +or to create a table and initialize some of its fields. +The general syntax for constructors is + +

+	tableconstructor ::= `{´ [fieldlist] `}´
+	fieldlist ::= field {fieldsep field} [fieldsep]
+	field ::= `[´ exp `]´ `=´ exp | Name `=´ exp | exp
+	fieldsep ::= `,´ | `;´
+
+ +

+Each field of the form [exp1] = exp2 adds to the new table an entry +with key exp1 and value exp2. +A field of the form name = exp is equivalent to +["name"] = exp. +Finally, fields of the form exp are equivalent to +[i] = exp, where i are consecutive numerical integers, +starting with 1. +Fields in the other formats do not affect this counting. +For example, + +

+     a = { [f(1)] = g; "x", "y"; x = 1, f(x), [30] = 23; 45 }
+

+is equivalent to + +

+     do
+       local t = {}
+       t[f(1)] = g
+       t[1] = "x"         -- 1st exp
+       t[2] = "y"         -- 2nd exp
+       t.x = 1            -- t["x"] = 1
+       t[3] = f(x)        -- 3rd exp
+       t[30] = 23
+       t[4] = 45          -- 4th exp
+       a = t
+     end
+
+ +

+If the last field in the list has the form exp +and the expression is a function call or a vararg expression, +then all values returned by this expression enter the list consecutively +(see §2.5.8). +To avoid this, +enclose the function call or the vararg expression +in parentheses (see §2.5). + + +

+The field list can have an optional trailing separator, +as a convenience for machine-generated code. + + + + + +

2.5.8 - Function Calls

+A function call in Lua has the following syntax: + +

+	functioncall ::= prefixexp args
+

+In a function call, +first prefixexp and args are evaluated. +If the value of prefixexp has type function, +then this function is called +with the given arguments. +Otherwise, the prefixexp "call" metamethod is called, +having as first parameter the value of prefixexp, +followed by the original call arguments +(see §2.8). + + +

+The form + +

+	functioncall ::= prefixexp `:´ Name args
+

+can be used to call "methods". +A call v:name(args) +is syntactic sugar for v.name(v,args), +except that v is evaluated only once. + + +

+Arguments have the following syntax: + +

+	args ::= `(´ [explist] `)´
+	args ::= tableconstructor
+	args ::= String
+

+All argument expressions are evaluated before the call. +A call of the form f{fields} is +syntactic sugar for f({fields}); +that is, the argument list is a single new table. +A call of the form f'string' +(or f"string" or f[[string]]) +is syntactic sugar for f('string'); +that is, the argument list is a single literal string. + + +

+As an exception to the free-format syntax of Lua, +you cannot put a line break before the '(' in a function call. +This restriction avoids some ambiguities in the language. +If you write + +

+     a = f
+     (g).x(a)
+

+Lua would see that as a single statement, a = f(g).x(a). +So, if you want two statements, you must add a semi-colon between them. +If you actually want to call f, +you must remove the line break before (g). + + +

+A call of the form return functioncall is called +a tail call. +Lua implements proper tail calls +(or proper tail recursion): +in a tail call, +the called function reuses the stack entry of the calling function. +Therefore, there is no limit on the number of nested tail calls that +a program can execute. +However, a tail call erases any debug information about the +calling function. +Note that a tail call only happens with a particular syntax, +where the return has one single function call as argument; +this syntax makes the calling function return exactly +the returns of the called function. +So, none of the following examples are tail calls: + +

+     return (f(x))        -- results adjusted to 1
+     return 2 * f(x)
+     return x, f(x)       -- additional results
+     f(x); return         -- results discarded
+     return x or f(x)     -- results adjusted to 1
+
+ + + + +

2.5.9 - Function Definitions

+ +

+The syntax for function definition is + +

+	function ::= function funcbody
+	funcbody ::= `(´ [parlist] `)´ block end
+
+ +

+The following syntactic sugar simplifies function definitions: + +

+	stat ::= function funcname funcbody
+	stat ::= local function Name funcbody
+	funcname ::= Name {`.´ Name} [`:´ Name]
+

+The statement + +

+     function f () body end
+

+translates to + +

+     f = function () body end
+

+The statement + +

+     function t.a.b.c.f () body end
+

+translates to + +

+     t.a.b.c.f = function () body end
+

+The statement + +

+     local function f () body end
+

+translates to + +

+     local f; f = function () body end
+

+not to + +

+     local f = function () body end
+

+(This only makes a difference when the body of the function +contains references to f.) + + +

+A function definition is an executable expression, +whose value has type function. +When Lua pre-compiles a chunk, +all its function bodies are pre-compiled too. +Then, whenever Lua executes the function definition, +the function is instantiated (or closed). +This function instance (or closure) +is the final value of the expression. +Different instances of the same function +can refer to different external local variables +and can have different environment tables. + + +

+Parameters act as local variables that are +initialized with the argument values: + +

+	parlist ::= namelist [`,´ `...´] | `...´
+

+When a function is called, +the list of arguments is adjusted to +the length of the list of parameters, +unless the function is a variadic or vararg function, +which is +indicated by three dots ('...') at the end of its parameter list. +A vararg function does not adjust its argument list; +instead, it collects all extra arguments and supplies them +to the function through a vararg expression, +which is also written as three dots. +The value of this expression is a list of all actual extra arguments, +similar to a function with multiple results. +If a vararg expression is used inside another expression +or in the middle of a list of expressions, +then its return list is adjusted to one element. +If the expression is used as the last element of a list of expressions, +then no adjustment is made +(unless that last expression is enclosed in parentheses). + + +

+As an example, consider the following definitions: + +

+     function f(a, b) end
+     function g(a, b, ...) end
+     function r() return 1,2,3 end
+

+Then, we have the following mapping from arguments to parameters and +to the vararg expression: + +

+     CALL            PARAMETERS
+     
+     f(3)             a=3, b=nil
+     f(3, 4)          a=3, b=4
+     f(3, 4, 5)       a=3, b=4
+     f(r(), 10)       a=1, b=10
+     f(r())           a=1, b=2
+     
+     g(3)             a=3, b=nil, ... -->  (nothing)
+     g(3, 4)          a=3, b=4,   ... -->  (nothing)
+     g(3, 4, 5, 8)    a=3, b=4,   ... -->  5  8
+     g(5, r())        a=5, b=1,   ... -->  2  3
+
+ +

+Results are returned using the return statement (see §2.4.4). +If control reaches the end of a function +without encountering a return statement, +then the function returns with no results. + + +

+The colon syntax +is used for defining methods, +that is, functions that have an implicit extra parameter self. +Thus, the statement + +

+     function t.a.b.c:f (params) body end
+

+is syntactic sugar for + +

+     t.a.b.c.f = function (self, params) body end
+
+ + + + + + +

2.6 - Visibility Rules

+ +

+ +Lua is a lexically scoped language. +The scope of variables begins at the first statement after +their declaration and lasts until the end of the innermost block that +includes the declaration. +Consider the following example: + +

+     x = 10                -- global variable
+     do                    -- new block
+       local x = x         -- new 'x', with value 10
+       print(x)            --> 10
+       x = x+1
+       do                  -- another block
+         local x = x+1     -- another 'x'
+         print(x)          --> 12
+       end
+       print(x)            --> 11
+     end
+     print(x)              --> 10  (the global one)
+
+ +

+Notice that, in a declaration like local x = x, +the new x being declared is not in scope yet, +and so the second x refers to the outside variable. + + +

+Because of the lexical scoping rules, +local variables can be freely accessed by functions +defined inside their scope. +A local variable used by an inner function is called +an upvalue, or external local variable, +inside the inner function. + + +

+Notice that each execution of a local statement +defines new local variables. +Consider the following example: + +

+     a = {}
+     local x = 20
+     for i=1,10 do
+       local y = 0
+       a[i] = function () y=y+1; return x+y end
+     end
+

+The loop creates ten closures +(that is, ten instances of the anonymous function). +Each of these closures uses a different y variable, +while all of them share the same x. + + + + + +

2.7 - Error Handling

+ +

+Because Lua is an embedded extension language, +all Lua actions start from C code in the host program +calling a function from the Lua library (see lua_pcall). +Whenever an error occurs during Lua compilation or execution, +control returns to C, +which can take appropriate measures +(such as printing an error message). + + +

+Lua code can explicitly generate an error by calling the +error function. +If you need to catch errors in Lua, +you can use the pcall function. + + + + + +

2.8 - Metatables

+ +

+Every value in Lua can have a metatable. +This metatable is an ordinary Lua table +that defines the behavior of the original value +under certain special operations. +You can change several aspects of the behavior +of operations over a value by setting specific fields in its metatable. +For instance, when a non-numeric value is the operand of an addition, +Lua checks for a function in the field "__add" in its metatable. +If it finds one, +Lua calls this function to perform the addition. + + +

+We call the keys in a metatable events +and the values metamethods. +In the previous example, the event is "add" +and the metamethod is the function that performs the addition. + + +

+You can query the metatable of any value +through the getmetatable function. + + +

+You can replace the metatable of tables +through the setmetatable +function. +You cannot change the metatable of other types from Lua +(except by using the debug library); +you must use the C API for that. + + +

+Tables and full userdata have individual metatables +(although multiple tables and userdata can share their metatables). +Values of all other types share one single metatable per type; +that is, there is one single metatable for all numbers, +one for all strings, etc. + + +

+A metatable controls how an object behaves in arithmetic operations, +order comparisons, concatenation, length operation, and indexing. +A metatable also can define a function to be called when a userdata +is garbage collected. +For each of these operations Lua associates a specific key +called an event. +When Lua performs one of these operations over a value, +it checks whether this value has a metatable with the corresponding event. +If so, the value associated with that key (the metamethod) +controls how Lua will perform the operation. + + +

+Metatables control the operations listed next. +Each operation is identified by its corresponding name. +The key for each operation is a string with its name prefixed by +two underscores, '__'; +for instance, the key for operation "add" is the +string "__add". +The semantics of these operations is better explained by a Lua function +describing how the interpreter executes the operation. + + +

+The code shown here in Lua is only illustrative; +the real behavior is hard coded in the interpreter +and it is much more efficient than this simulation. +All functions used in these descriptions +(rawget, tonumber, etc.) +are described in §5.1. +In particular, to retrieve the metamethod of a given object, +we use the expression + +

+     metatable(obj)[event]
+

+This should be read as + +

+     rawget(getmetatable(obj) or {}, event)
+

+ +That is, the access to a metamethod does not invoke other metamethods, +and the access to objects with no metatables does not fail +(it simply results in nil). + + + +

    + +
  • "add": +the + operation. + + + +

    +The function getbinhandler below defines how Lua chooses a handler +for a binary operation. +First, Lua tries the first operand. +If its type does not define a handler for the operation, +then Lua tries the second operand. + +

    +     function getbinhandler (op1, op2, event)
    +       return metatable(op1)[event] or metatable(op2)[event]
    +     end
    +

    +By using this function, +the behavior of the op1 + op2 is + +

    +     function add_event (op1, op2)
    +       local o1, o2 = tonumber(op1), tonumber(op2)
    +       if o1 and o2 then  -- both operands are numeric?
    +         return o1 + o2   -- '+' here is the primitive 'add'
    +       else  -- at least one of the operands is not numeric
    +         local h = getbinhandler(op1, op2, "__add")
    +         if h then
    +           -- call the handler with both operands
    +           return (h(op1, op2))
    +         else  -- no handler available: default behavior
    +           error(···)
    +         end
    +       end
    +     end
    +

    +

  • + +
  • "sub": +the - operation. + +Behavior similar to the "add" operation. +
  • + +
  • "mul": +the * operation. + +Behavior similar to the "add" operation. +
  • + +
  • "div": +the / operation. + +Behavior similar to the "add" operation. +
  • + +
  • "mod": +the % operation. + +Behavior similar to the "add" operation, +with the operation +o1 - floor(o1/o2)*o2 as the primitive operation. +
  • + +
  • "pow": +the ^ (exponentiation) operation. + +Behavior similar to the "add" operation, +with the function pow (from the C math library) +as the primitive operation. +
  • + +
  • "unm": +the unary - operation. + + +
    +     function unm_event (op)
    +       local o = tonumber(op)
    +       if o then  -- operand is numeric?
    +         return -o  -- '-' here is the primitive 'unm'
    +       else  -- the operand is not numeric.
    +         -- Try to get a handler from the operand
    +         local h = metatable(op).__unm
    +         if h then
    +           -- call the handler with the operand
    +           return (h(op))
    +         else  -- no handler available: default behavior
    +           error(···)
    +         end
    +       end
    +     end
    +

    +

  • + +
  • "concat": +the .. (concatenation) operation. + + +
    +     function concat_event (op1, op2)
    +       if (type(op1) == "string" or type(op1) == "number") and
    +          (type(op2) == "string" or type(op2) == "number") then
    +         return op1 .. op2  -- primitive string concatenation
    +       else
    +         local h = getbinhandler(op1, op2, "__concat")
    +         if h then
    +           return (h(op1, op2))
    +         else
    +           error(···)
    +         end
    +       end
    +     end
    +

    +

  • + +
  • "len": +the # operation. + + +
    +     function len_event (op)
    +       if type(op) == "string" then
    +         return strlen(op)         -- primitive string length
    +       elseif type(op) == "table" then
    +         return #op                -- primitive table length
    +       else
    +         local h = metatable(op).__len
    +         if h then
    +           -- call the handler with the operand
    +           return (h(op))
    +         else  -- no handler available: default behavior
    +           error(···)
    +         end
    +       end
    +     end
    +

    +See §2.5.5 for a description of the length of a table. +

  • + +
  • "eq": +the == operation. + +The function getcomphandler defines how Lua chooses a metamethod +for comparison operators. +A metamethod only is selected when both objects +being compared have the same type +and the same metamethod for the selected operation. + +
    +     function getcomphandler (op1, op2, event)
    +       if type(op1) ~= type(op2) then return nil end
    +       local mm1 = metatable(op1)[event]
    +       local mm2 = metatable(op2)[event]
    +       if mm1 == mm2 then return mm1 else return nil end
    +     end
    +

    +The "eq" event is defined as follows: + +

    +     function eq_event (op1, op2)
    +       if type(op1) ~= type(op2) then  -- different types?
    +         return false   -- different objects
    +       end
    +       if op1 == op2 then   -- primitive equal?
    +         return true   -- objects are equal
    +       end
    +       -- try metamethod
    +       local h = getcomphandler(op1, op2, "__eq")
    +       if h then
    +         return (h(op1, op2))
    +       else
    +         return false
    +       end
    +     end
    +

    +a ~= b is equivalent to not (a == b). +

  • + +
  • "lt": +the < operation. + + +
    +     function lt_event (op1, op2)
    +       if type(op1) == "number" and type(op2) == "number" then
    +         return op1 < op2   -- numeric comparison
    +       elseif type(op1) == "string" and type(op2) == "string" then
    +         return op1 < op2   -- lexicographic comparison
    +       else
    +         local h = getcomphandler(op1, op2, "__lt")
    +         if h then
    +           return (h(op1, op2))
    +         else
    +           error(···)
    +         end
    +       end
    +     end
    +

    +a > b is equivalent to b < a. +

  • + +
  • "le": +the <= operation. + + +
    +     function le_event (op1, op2)
    +       if type(op1) == "number" and type(op2) == "number" then
    +         return op1 <= op2   -- numeric comparison
    +       elseif type(op1) == "string" and type(op2) == "string" then
    +         return op1 <= op2   -- lexicographic comparison
    +       else
    +         local h = getcomphandler(op1, op2, "__le")
    +         if h then
    +           return (h(op1, op2))
    +         else
    +           h = getcomphandler(op1, op2, "__lt")
    +           if h then
    +             return not h(op2, op1)
    +           else
    +             error(···)
    +           end
    +         end
    +       end
    +     end
    +

    +a >= b is equivalent to b <= a. +Note that, in the absence of a "le" metamethod, +Lua tries the "lt", assuming that a <= b is +equivalent to not (b < a). +

  • + +
  • "index": +The indexing access table[key]. + + +
    +     function gettable_event (table, key)
    +       local h
    +       if type(table) == "table" then
    +         local v = rawget(table, key)
    +         if v ~= nil then return v end
    +         h = metatable(table).__index
    +         if h == nil then return nil end
    +       else
    +         h = metatable(table).__index
    +         if h == nil then
    +           error(···)
    +         end
    +       end
    +       if type(h) == "function" then
    +         return (h(table, key))     -- call the handler
    +       else return h[key]           -- or repeat operation on it
    +       end
    +     end
    +

    +

  • + +
  • "newindex": +The indexing assignment table[key] = value. + + +
    +     function settable_event (table, key, value)
    +       local h
    +       if type(table) == "table" then
    +         local v = rawget(table, key)
    +         if v ~= nil then rawset(table, key, value); return end
    +         h = metatable(table).__newindex
    +         if h == nil then rawset(table, key, value); return end
    +       else
    +         h = metatable(table).__newindex
    +         if h == nil then
    +           error(···)
    +         end
    +       end
    +       if type(h) == "function" then
    +         h(table, key,value)           -- call the handler
    +       else h[key] = value             -- or repeat operation on it
    +       end
    +     end
    +

    +

  • + +
  • "call": +called when Lua calls a value. + + +
    +     function function_event (func, ...)
    +       if type(func) == "function" then
    +         return func(...)   -- primitive call
    +       else
    +         local h = metatable(func).__call
    +         if h then
    +           return h(func, ...)
    +         else
    +           error(···)
    +         end
    +       end
    +     end
    +

    +

  • + +
+ + + + +

2.9 - Environments

+ +

+Besides metatables, +objects of types thread, function, and userdata +have another table associated with them, +called their environment. +Like metatables, environments are regular tables and +multiple objects can share the same environment. + + +

+Threads are created sharing the environment of the creating thread. +Userdata and C functions are created sharing the environment +of the creating C function. +Non-nested Lua functions +(created by loadfile, loadstring or load) +are created sharing the environment of the creating thread. +Nested Lua functions are created sharing the environment of +the creating Lua function. + + +

+Environments associated with userdata have no meaning for Lua. +It is only a convenience feature for programmers to associate a table to +a userdata. + + +

+Environments associated with threads are called +global environments. +They are used as the default environment for threads and +non-nested Lua functions created by the thread +and can be directly accessed by C code (see §3.3). + + +

+The environment associated with a C function can be directly +accessed by C code (see §3.3). +It is used as the default environment for other C functions +and userdata created by the function. + + +

+Environments associated with Lua functions are used to resolve +all accesses to global variables within the function (see §2.3). +They are used as the default environment for nested Lua functions +created by the function. + + +

+You can change the environment of a Lua function or the +running thread by calling setfenv. +You can get the environment of a Lua function or the running thread +by calling getfenv. +To manipulate the environment of other objects +(userdata, C functions, other threads) you must +use the C API. + + + + + +

2.10 - Garbage Collection

+ +

+Lua performs automatic memory management. +This means that +you have to worry neither about allocating memory for new objects +nor about freeing it when the objects are no longer needed. +Lua manages memory automatically by running +a garbage collector from time to time +to collect all dead objects +(that is, objects that are no longer accessible from Lua). +All memory used by Lua is subject to automatic management: +tables, userdata, functions, threads, strings, etc. + + +

+Lua implements an incremental mark-and-sweep collector. +It uses two numbers to control its garbage-collection cycles: +the garbage-collector pause and +the garbage-collector step multiplier. +Both use percentage points as units +(so that a value of 100 means an internal value of 1). + + +

+The garbage-collector pause +controls how long the collector waits before starting a new cycle. +Larger values make the collector less aggressive. +Values smaller than 100 mean the collector will not wait to +start a new cycle. +A value of 200 means that the collector waits for the total memory in use +to double before starting a new cycle. + + +

+The step multiplier +controls the relative speed of the collector relative to +memory allocation. +Larger values make the collector more aggressive but also increase +the size of each incremental step. +Values smaller than 100 make the collector too slow and +can result in the collector never finishing a cycle. +The default, 200, means that the collector runs at "twice" +the speed of memory allocation. + + +

+You can change these numbers by calling lua_gc in C +or collectgarbage in Lua. +With these functions you can also control +the collector directly (e.g., stop and restart it). + + + +

2.10.1 - Garbage-Collection Metamethods

+ +

+Using the C API, +you can set garbage-collector metamethods for userdata (see §2.8). +These metamethods are also called finalizers. +Finalizers allow you to coordinate Lua's garbage collection +with external resource management +(such as closing files, network or database connections, +or freeing your own memory). + + +

+Garbage userdata with a field __gc in their metatables are not +collected immediately by the garbage collector. +Instead, Lua puts them in a list. +After the collection, +Lua does the equivalent of the following function +for each userdata in that list: + +

+     function gc_event (udata)
+       local h = metatable(udata).__gc
+       if h then
+         h(udata)
+       end
+     end
+
+ +

+At the end of each garbage-collection cycle, +the finalizers for userdata are called in reverse +order of their creation, +among those collected in that cycle. +That is, the first finalizer to be called is the one associated +with the userdata created last in the program. +The userdata itself is freed only in the next garbage-collection cycle. + + + + + +

2.10.2 - Weak Tables

+ +

+A weak table is a table whose elements are +weak references. +A weak reference is ignored by the garbage collector. +In other words, +if the only references to an object are weak references, +then the garbage collector will collect this object. + + +

+A weak table can have weak keys, weak values, or both. +A table with weak keys allows the collection of its keys, +but prevents the collection of its values. +A table with both weak keys and weak values allows the collection of +both keys and values. +In any case, if either the key or the value is collected, +the whole pair is removed from the table. +The weakness of a table is controlled by the +__mode field of its metatable. +If the __mode field is a string containing the character 'k', +the keys in the table are weak. +If __mode contains 'v', +the values in the table are weak. + + +

+After you use a table as a metatable, +you should not change the value of its __mode field. +Otherwise, the weak behavior of the tables controlled by this +metatable is undefined. + + + + + + + +

2.11 - Coroutines

+ +

+Lua supports coroutines, +also called collaborative multithreading. +A coroutine in Lua represents an independent thread of execution. +Unlike threads in multithread systems, however, +a coroutine only suspends its execution by explicitly calling +a yield function. + + +

+You create a coroutine with a call to coroutine.create. +Its sole argument is a function +that is the main function of the coroutine. +The create function only creates a new coroutine and +returns a handle to it (an object of type thread); +it does not start the coroutine execution. + + +

+When you first call coroutine.resume, +passing as its first argument +a thread returned by coroutine.create, +the coroutine starts its execution, +at the first line of its main function. +Extra arguments passed to coroutine.resume are passed on +to the coroutine main function. +After the coroutine starts running, +it runs until it terminates or yields. + + +

+A coroutine can terminate its execution in two ways: +normally, when its main function returns +(explicitly or implicitly, after the last instruction); +and abnormally, if there is an unprotected error. +In the first case, coroutine.resume returns true, +plus any values returned by the coroutine main function. +In case of errors, coroutine.resume returns false +plus an error message. + + +

+A coroutine yields by calling coroutine.yield. +When a coroutine yields, +the corresponding coroutine.resume returns immediately, +even if the yield happens inside nested function calls +(that is, not in the main function, +but in a function directly or indirectly called by the main function). +In the case of a yield, coroutine.resume also returns true, +plus any values passed to coroutine.yield. +The next time you resume the same coroutine, +it continues its execution from the point where it yielded, +with the call to coroutine.yield returning any extra +arguments passed to coroutine.resume. + + +

+Like coroutine.create, +the coroutine.wrap function also creates a coroutine, +but instead of returning the coroutine itself, +it returns a function that, when called, resumes the coroutine. +Any arguments passed to this function +go as extra arguments to coroutine.resume. +coroutine.wrap returns all the values returned by coroutine.resume, +except the first one (the boolean error code). +Unlike coroutine.resume, +coroutine.wrap does not catch errors; +any error is propagated to the caller. + + +

+As an example, +consider the following code: + +

+     function foo (a)
+       print("foo", a)
+       return coroutine.yield(2*a)
+     end
+     
+     co = coroutine.create(function (a,b)
+           print("co-body", a, b)
+           local r = foo(a+1)
+           print("co-body", r)
+           local r, s = coroutine.yield(a+b, a-b)
+           print("co-body", r, s)
+           return b, "end"
+     end)
+            
+     print("main", coroutine.resume(co, 1, 10))
+     print("main", coroutine.resume(co, "r"))
+     print("main", coroutine.resume(co, "x", "y"))
+     print("main", coroutine.resume(co, "x", "y"))
+

+When you run it, it produces the following output: + +

+     co-body 1       10
+     foo     2
+     
+     main    true    4
+     co-body r
+     main    true    11      -9
+     co-body x       y
+     main    true    10      end
+     main    false   cannot resume dead coroutine
+
+ + + + +

3 - The Application Program Interface

+ +

+ +This section describes the C API for Lua, that is, +the set of C functions available to the host program to communicate +with Lua. +All API functions and related types and constants +are declared in the header file lua.h. + + +

+Even when we use the term "function", +any facility in the API may be provided as a macro instead. +All such macros use each of their arguments exactly once +(except for the first argument, which is always a Lua state), +and so do not generate any hidden side-effects. + + +

+As in most C libraries, +the Lua API functions do not check their arguments for validity or consistency. +However, you can change this behavior by compiling Lua +with a proper definition for the macro luai_apicheck, +in file luaconf.h. + + + +

3.1 - The Stack

+ +

+Lua uses a virtual stack to pass values to and from C. +Each element in this stack represents a Lua value +(nil, number, string, etc.). + + +

+Whenever Lua calls C, the called function gets a new stack, +which is independent of previous stacks and of stacks of +C functions that are still active. +This stack initially contains any arguments to the C function +and it is where the C function pushes its results +to be returned to the caller (see lua_CFunction). + + +

+For convenience, +most query operations in the API do not follow a strict stack discipline. +Instead, they can refer to any element in the stack +by using an index: +A positive index represents an absolute stack position +(starting at 1); +a negative index represents an offset relative to the top of the stack. +More specifically, if the stack has n elements, +then index 1 represents the first element +(that is, the element that was pushed onto the stack first) +and +index n represents the last element; +index -1 also represents the last element +(that is, the element at the top) +and index -n represents the first element. +We say that an index is valid +if it lies between 1 and the stack top +(that is, if 1 ≤ abs(index) ≤ top). + + + + + + +

3.2 - Stack Size

+ +

+When you interact with Lua API, +you are responsible for ensuring consistency. +In particular, +you are responsible for controlling stack overflow. +You can use the function lua_checkstack +to grow the stack size. + + +

+Whenever Lua calls C, +it ensures that at least LUA_MINSTACK stack positions are available. +LUA_MINSTACK is defined as 20, +so that usually you do not have to worry about stack space +unless your code has loops pushing elements onto the stack. + + +

+Most query functions accept as indices any value inside the +available stack space, that is, indices up to the maximum stack size +you have set through lua_checkstack. +Such indices are called acceptable indices. +More formally, we define an acceptable index +as follows: + +

+     (index < 0 && abs(index) <= top) ||
+     (index > 0 && index <= stackspace)
+

+Note that 0 is never an acceptable index. + + + + + +

3.3 - Pseudo-Indices

+ +

+Unless otherwise noted, +any function that accepts valid indices can also be called with +pseudo-indices, +which represent some Lua values that are accessible to C code +but which are not in the stack. +Pseudo-indices are used to access the thread environment, +the function environment, +the registry, +and the upvalues of a C function (see §3.4). + + +

+The thread environment (where global variables live) is +always at pseudo-index LUA_GLOBALSINDEX. +The environment of the running C function is always +at pseudo-index LUA_ENVIRONINDEX. + + +

+To access and change the value of global variables, +you can use regular table operations over an environment table. +For instance, to access the value of a global variable, do + +

+     lua_getfield(L, LUA_GLOBALSINDEX, varname);
+
+ + + + +

3.4 - C Closures

+ +

+When a C function is created, +it is possible to associate some values with it, +thus creating a C closure; +these values are called upvalues and are +accessible to the function whenever it is called +(see lua_pushcclosure). + + +

+Whenever a C function is called, +its upvalues are located at specific pseudo-indices. +These pseudo-indices are produced by the macro +lua_upvalueindex. +The first value associated with a function is at position +lua_upvalueindex(1), and so on. +Any access to lua_upvalueindex(n), +where n is greater than the number of upvalues of the +current function (but not greater than 256), +produces an acceptable (but invalid) index. + + + + + +

3.5 - Registry

+ +

+Lua provides a registry, +a pre-defined table that can be used by any C code to +store whatever Lua value it needs to store. +This table is always located at pseudo-index +LUA_REGISTRYINDEX. +Any C library can store data into this table, +but it should take care to choose keys different from those used +by other libraries, to avoid collisions. +Typically, you should use as key a string containing your library name +or a light userdata with the address of a C object in your code. + + +

+The integer keys in the registry are used by the reference mechanism, +implemented by the auxiliary library, +and therefore should not be used for other purposes. + + + + + +

3.6 - Error Handling in C

+ +

+Internally, Lua uses the C longjmp facility to handle errors. +(You can also choose to use exceptions if you use C++; +see file luaconf.h.) +When Lua faces any error +(such as memory allocation errors, type errors, syntax errors, +and runtime errors) +it raises an error; +that is, it does a long jump. +A protected environment uses setjmp +to set a recover point; +any error jumps to the most recent active recover point. + + +

+Most functions in the API can throw an error, +for instance due to a memory allocation error. +The documentation for each function indicates whether +it can throw errors. + + +

+Inside a C function you can throw an error by calling lua_error. + + + + + +

3.7 - Functions and Types

+ +

+Here we list all functions and types from the C API in +alphabetical order. +Each function has an indicator like this: +[-o, +p, x] + + +

+The first field, o, +is how many elements the function pops from the stack. +The second field, p, +is how many elements the function pushes onto the stack. +(Any function always pushes its results after popping its arguments.) +A field in the form x|y means the function can push (or pop) +x or y elements, +depending on the situation; +an interrogation mark '?' means that +we cannot know how many elements the function pops/pushes +by looking only at its arguments +(e.g., they may depend on what is on the stack). +The third field, x, +tells whether the function may throw errors: +'-' means the function never throws any error; +'m' means the function may throw an error +only due to not enough memory; +'e' means the function may throw other kinds of errors; +'v' means the function may throw an error on purpose. + + + +


lua_Alloc

+
typedef void * (*lua_Alloc) (void *ud,
+                             void *ptr,
+                             size_t osize,
+                             size_t nsize);
+ +

+The type of the memory-allocation function used by Lua states. +The allocator function must provide a +functionality similar to realloc, +but not exactly the same. +Its arguments are +ud, an opaque pointer passed to lua_newstate; +ptr, a pointer to the block being allocated/reallocated/freed; +osize, the original size of the block; +nsize, the new size of the block. +ptr is NULL if and only if osize is zero. +When nsize is zero, the allocator must return NULL; +if osize is not zero, +it should free the block pointed to by ptr. +When nsize is not zero, the allocator returns NULL +if and only if it cannot fill the request. +When nsize is not zero and osize is zero, +the allocator should behave like malloc. +When nsize and osize are not zero, +the allocator behaves like realloc. +Lua assumes that the allocator never fails when +osize >= nsize. + + +

+Here is a simple implementation for the allocator function. +It is used in the auxiliary library by luaL_newstate. + +

+     static void *l_alloc (void *ud, void *ptr, size_t osize,
+                                                size_t nsize) {
+       (void)ud;  (void)osize;  /* not used */
+       if (nsize == 0) {
+         free(ptr);
+         return NULL;
+       }
+       else
+         return realloc(ptr, nsize);
+     }
+

+This code assumes +that free(NULL) has no effect and that +realloc(NULL, size) is equivalent to malloc(size). +ANSI C ensures both behaviors. + + + + + +


lua_atpanic

+[-0, +0, -] +

lua_CFunction lua_atpanic (lua_State *L, lua_CFunction panicf);
+ +

+Sets a new panic function and returns the old one. + + +

+If an error happens outside any protected environment, +Lua calls a panic function +and then calls exit(EXIT_FAILURE), +thus exiting the host application. +Your panic function can avoid this exit by +never returning (e.g., doing a long jump). + + +

+The panic function can access the error message at the top of the stack. + + + + + +


lua_call

+[-(nargs + 1), +nresults, e] +

void lua_call (lua_State *L, int nargs, int nresults);
+ +

+Calls a function. + + +

+To call a function you must use the following protocol: +first, the function to be called is pushed onto the stack; +then, the arguments to the function are pushed +in direct order; +that is, the first argument is pushed first. +Finally you call lua_call; +nargs is the number of arguments that you pushed onto the stack. +All arguments and the function value are popped from the stack +when the function is called. +The function results are pushed onto the stack when the function returns. +The number of results is adjusted to nresults, +unless nresults is LUA_MULTRET. +In this case, all results from the function are pushed. +Lua takes care that the returned values fit into the stack space. +The function results are pushed onto the stack in direct order +(the first result is pushed first), +so that after the call the last result is on the top of the stack. + + +

+Any error inside the called function is propagated upwards +(with a longjmp). + + +

+The following example shows how the host program can do the +equivalent to this Lua code: + +

+     a = f("how", t.x, 14)
+

+Here it is in C: + +

+     lua_getfield(L, LUA_GLOBALSINDEX, "f"); /* function to be called */
+     lua_pushstring(L, "how");                        /* 1st argument */
+     lua_getfield(L, LUA_GLOBALSINDEX, "t");   /* table to be indexed */
+     lua_getfield(L, -1, "x");        /* push result of t.x (2nd arg) */
+     lua_remove(L, -2);                  /* remove 't' from the stack */
+     lua_pushinteger(L, 14);                          /* 3rd argument */
+     lua_call(L, 3, 1);     /* call 'f' with 3 arguments and 1 result */
+     lua_setfield(L, LUA_GLOBALSINDEX, "a");        /* set global 'a' */
+

+Note that the code above is "balanced": +at its end, the stack is back to its original configuration. +This is considered good programming practice. + + + + + +


lua_CFunction

+
typedef int (*lua_CFunction) (lua_State *L);
+ +

+Type for C functions. + + +

+In order to communicate properly with Lua, +a C function must use the following protocol, +which defines the way parameters and results are passed: +a C function receives its arguments from Lua in its stack +in direct order (the first argument is pushed first). +So, when the function starts, +lua_gettop(L) returns the number of arguments received by the function. +The first argument (if any) is at index 1 +and its last argument is at index lua_gettop(L). +To return values to Lua, a C function just pushes them onto the stack, +in direct order (the first result is pushed first), +and returns the number of results. +Any other value in the stack below the results will be properly +discarded by Lua. +Like a Lua function, a C function called by Lua can also return +many results. + + +

+As an example, the following function receives a variable number +of numerical arguments and returns their average and sum: + +

+     static int foo (lua_State *L) {
+       int n = lua_gettop(L);    /* number of arguments */
+       lua_Number sum = 0;
+       int i;
+       for (i = 1; i <= n; i++) {
+         if (!lua_isnumber(L, i)) {
+           lua_pushstring(L, "incorrect argument");
+           lua_error(L);
+         }
+         sum += lua_tonumber(L, i);
+       }
+       lua_pushnumber(L, sum/n);        /* first result */
+       lua_pushnumber(L, sum);         /* second result */
+       return 2;                   /* number of results */
+     }
+
+ + + + +

lua_checkstack

+[-0, +0, m] +

int lua_checkstack (lua_State *L, int extra);
+ +

+Ensures that there are at least extra free stack slots in the stack. +It returns false if it cannot grow the stack to that size. +This function never shrinks the stack; +if the stack is already larger than the new size, +it is left unchanged. + + + + + +


lua_close

+[-0, +0, -] +

void lua_close (lua_State *L);
+ +

+Destroys all objects in the given Lua state +(calling the corresponding garbage-collection metamethods, if any) +and frees all dynamic memory used by this state. +On several platforms, you may not need to call this function, +because all resources are naturally released when the host program ends. +On the other hand, long-running programs, +such as a daemon or a web server, +might need to release states as soon as they are not needed, +to avoid growing too large. + + + + + +


lua_concat

+[-n, +1, e] +

void lua_concat (lua_State *L, int n);
+ +

+Concatenates the n values at the top of the stack, +pops them, and leaves the result at the top. +If n is 1, the result is the single value on the stack +(that is, the function does nothing); +if n is 0, the result is the empty string. +Concatenation is performed following the usual semantics of Lua +(see §2.5.4). + + + + + +


lua_cpcall

+[-0, +(0|1), -] +

int lua_cpcall (lua_State *L, lua_CFunction func, void *ud);
+ +

+Calls the C function func in protected mode. +func starts with only one element in its stack, +a light userdata containing ud. +In case of errors, +lua_cpcall returns the same error codes as lua_pcall, +plus the error object on the top of the stack; +otherwise, it returns zero, and does not change the stack. +All values returned by func are discarded. + + + + + +


lua_createtable

+[-0, +1, m] +

void lua_createtable (lua_State *L, int narr, int nrec);
+ +

+Creates a new empty table and pushes it onto the stack. +The new table has space pre-allocated +for narr array elements and nrec non-array elements. +This pre-allocation is useful when you know exactly how many elements +the table will have. +Otherwise you can use the function lua_newtable. + + + + + +


lua_dump

+[-0, +0, m] +

int lua_dump (lua_State *L, lua_Writer writer, void *data);
+ +

+Dumps a function as a binary chunk. +Receives a Lua function on the top of the stack +and produces a binary chunk that, +if loaded again, +results in a function equivalent to the one dumped. +As it produces parts of the chunk, +lua_dump calls function writer (see lua_Writer) +with the given data +to write them. + + +

+The value returned is the error code returned by the last +call to the writer; +0 means no errors. + + +

+This function does not pop the Lua function from the stack. + + + + + +


lua_equal

+[-0, +0, e] +

int lua_equal (lua_State *L, int index1, int index2);
+ +

+Returns 1 if the two values in acceptable indices index1 and +index2 are equal, +following the semantics of the Lua == operator +(that is, may call metamethods). +Otherwise returns 0. +Also returns 0 if any of the indices is non valid. + + + + + +


lua_error

+[-1, +0, v] +

int lua_error (lua_State *L);
+ +

+Generates a Lua error. +The error message (which can actually be a Lua value of any type) +must be on the stack top. +This function does a long jump, +and therefore never returns. +(see luaL_error). + + + + + +


lua_gc

+[-0, +0, e] +

int lua_gc (lua_State *L, int what, int data);
+ +

+Controls the garbage collector. + + +

+This function performs several tasks, +according to the value of the parameter what: + +

    + +
  • LUA_GCSTOP: +stops the garbage collector. +
  • + +
  • LUA_GCRESTART: +restarts the garbage collector. +
  • + +
  • LUA_GCCOLLECT: +performs a full garbage-collection cycle. +
  • + +
  • LUA_GCCOUNT: +returns the current amount of memory (in Kbytes) in use by Lua. +
  • + +
  • LUA_GCCOUNTB: +returns the remainder of dividing the current amount of bytes of +memory in use by Lua by 1024. +
  • + +
  • LUA_GCSTEP: +performs an incremental step of garbage collection. +The step "size" is controlled by data +(larger values mean more steps) in a non-specified way. +If you want to control the step size +you must experimentally tune the value of data. +The function returns 1 if the step finished a +garbage-collection cycle. +
  • + +
  • LUA_GCSETPAUSE: +sets data as the new value +for the pause of the collector (see §2.10). +The function returns the previous value of the pause. +
  • + +
  • LUA_GCSETSTEPMUL: +sets data as the new value for the step multiplier of +the collector (see §2.10). +The function returns the previous value of the step multiplier. +
  • + +
+ + + + +

lua_getallocf

+[-0, +0, -] +

lua_Alloc lua_getallocf (lua_State *L, void **ud);
+ +

+Returns the memory-allocation function of a given state. +If ud is not NULL, Lua stores in *ud the +opaque pointer passed to lua_newstate. + + + + + +


lua_getfenv

+[-0, +1, -] +

void lua_getfenv (lua_State *L, int index);
+ +

+Pushes onto the stack the environment table of +the value at the given index. + + + + + +


lua_getfield

+[-0, +1, e] +

void lua_getfield (lua_State *L, int index, const char *k);
+ +

+Pushes onto the stack the value t[k], +where t is the value at the given valid index. +As in Lua, this function may trigger a metamethod +for the "index" event (see §2.8). + + + + + +


lua_getglobal

+[-0, +1, e] +

void lua_getglobal (lua_State *L, const char *name);
+ +

+Pushes onto the stack the value of the global name. +It is defined as a macro: + +

+     #define lua_getglobal(L,s)  lua_getfield(L, LUA_GLOBALSINDEX, s)
+
+ + + + +

lua_getmetatable

+[-0, +(0|1), -] +

int lua_getmetatable (lua_State *L, int index);
+ +

+Pushes onto the stack the metatable of the value at the given +acceptable index. +If the index is not valid, +or if the value does not have a metatable, +the function returns 0 and pushes nothing on the stack. + + + + + +


lua_gettable

+[-1, +1, e] +

void lua_gettable (lua_State *L, int index);
+ +

+Pushes onto the stack the value t[k], +where t is the value at the given valid index +and k is the value at the top of the stack. + + +

+This function pops the key from the stack +(putting the resulting value in its place). +As in Lua, this function may trigger a metamethod +for the "index" event (see §2.8). + + + + + +


lua_gettop

+[-0, +0, -] +

int lua_gettop (lua_State *L);
+ +

+Returns the index of the top element in the stack. +Because indices start at 1, +this result is equal to the number of elements in the stack +(and so 0 means an empty stack). + + + + + +


lua_insert

+[-1, +1, -] +

void lua_insert (lua_State *L, int index);
+ +

+Moves the top element into the given valid index, +shifting up the elements above this index to open space. +Cannot be called with a pseudo-index, +because a pseudo-index is not an actual stack position. + + + + + +


lua_Integer

+
typedef ptrdiff_t lua_Integer;
+ +

+The type used by the Lua API to represent integral values. + + +

+By default it is a ptrdiff_t, +which is usually the largest signed integral type the machine handles +"comfortably". + + + + + +


lua_isboolean

+[-0, +0, -] +

int lua_isboolean (lua_State *L, int index);
+ +

+Returns 1 if the value at the given acceptable index has type boolean, +and 0 otherwise. + + + + + +


lua_iscfunction

+[-0, +0, -] +

int lua_iscfunction (lua_State *L, int index);
+ +

+Returns 1 if the value at the given acceptable index is a C function, +and 0 otherwise. + + + + + +


lua_isfunction

+[-0, +0, -] +

int lua_isfunction (lua_State *L, int index);
+ +

+Returns 1 if the value at the given acceptable index is a function +(either C or Lua), and 0 otherwise. + + + + + +


lua_islightuserdata

+[-0, +0, -] +

int lua_islightuserdata (lua_State *L, int index);
+ +

+Returns 1 if the value at the given acceptable index is a light userdata, +and 0 otherwise. + + + + + +


lua_isnil

+[-0, +0, -] +

int lua_isnil (lua_State *L, int index);
+ +

+Returns 1 if the value at the given acceptable index is nil, +and 0 otherwise. + + + + + +


lua_isnone

+[-0, +0, -] +

int lua_isnone (lua_State *L, int index);
+ +

+Returns 1 if the given acceptable index is not valid +(that is, it refers to an element outside the current stack), +and 0 otherwise. + + + + + +


lua_isnoneornil

+[-0, +0, -] +

int lua_isnoneornil (lua_State *L, int index);
+ +

+Returns 1 if the given acceptable index is not valid +(that is, it refers to an element outside the current stack) +or if the value at this index is nil, +and 0 otherwise. + + + + + +


lua_isnumber

+[-0, +0, -] +

int lua_isnumber (lua_State *L, int index);
+ +

+Returns 1 if the value at the given acceptable index is a number +or a string convertible to a number, +and 0 otherwise. + + + + + +


lua_isstring

+[-0, +0, -] +

int lua_isstring (lua_State *L, int index);
+ +

+Returns 1 if the value at the given acceptable index is a string +or a number (which is always convertible to a string), +and 0 otherwise. + + + + + +


lua_istable

+[-0, +0, -] +

int lua_istable (lua_State *L, int index);
+ +

+Returns 1 if the value at the given acceptable index is a table, +and 0 otherwise. + + + + + +


lua_isthread

+[-0, +0, -] +

int lua_isthread (lua_State *L, int index);
+ +

+Returns 1 if the value at the given acceptable index is a thread, +and 0 otherwise. + + + + + +


lua_isuserdata

+[-0, +0, -] +

int lua_isuserdata (lua_State *L, int index);
+ +

+Returns 1 if the value at the given acceptable index is a userdata +(either full or light), and 0 otherwise. + + + + + +


lua_lessthan

+[-0, +0, e] +

int lua_lessthan (lua_State *L, int index1, int index2);
+ +

+Returns 1 if the value at acceptable index index1 is smaller +than the value at acceptable index index2, +following the semantics of the Lua < operator +(that is, may call metamethods). +Otherwise returns 0. +Also returns 0 if any of the indices is non valid. + + + + + +


lua_load

+[-0, +1, -] +

int lua_load (lua_State *L,
+              lua_Reader reader,
+              void *data,
+              const char *chunkname);
+ +

+Loads a Lua chunk. +If there are no errors, +lua_load pushes the compiled chunk as a Lua +function on top of the stack. +Otherwise, it pushes an error message. +The return values of lua_load are: + +

    + +
  • 0: no errors;
  • + +
  • LUA_ERRSYNTAX: +syntax error during pre-compilation;
  • + +
  • LUA_ERRMEM: +memory allocation error.
  • + +
+ +

+This function only loads a chunk; +it does not run it. + + +

+lua_load automatically detects whether the chunk is text or binary, +and loads it accordingly (see program luac). + + +

+The lua_load function uses a user-supplied reader function +to read the chunk (see lua_Reader). +The data argument is an opaque value passed to the reader function. + + +

+The chunkname argument gives a name to the chunk, +which is used for error messages and in debug information (see §3.8). + + + + + +


lua_newstate

+[-0, +0, -] +

lua_State *lua_newstate (lua_Alloc f, void *ud);
+ +

+Creates a new, independent state. +Returns NULL if cannot create the state +(due to lack of memory). +The argument f is the allocator function; +Lua does all memory allocation for this state through this function. +The second argument, ud, is an opaque pointer that Lua +simply passes to the allocator in every call. + + + + + +


lua_newtable

+[-0, +1, m] +

void lua_newtable (lua_State *L);
+ +

+Creates a new empty table and pushes it onto the stack. +It is equivalent to lua_createtable(L, 0, 0). + + + + + +


lua_newthread

+[-0, +1, m] +

lua_State *lua_newthread (lua_State *L);
+ +

+Creates a new thread, pushes it on the stack, +and returns a pointer to a lua_State that represents this new thread. +The new state returned by this function shares with the original state +all global objects (such as tables), +but has an independent execution stack. + + +

+There is no explicit function to close or to destroy a thread. +Threads are subject to garbage collection, +like any Lua object. + + + + + +


lua_newuserdata

+[-0, +1, m] +

void *lua_newuserdata (lua_State *L, size_t size);
+ +

+This function allocates a new block of memory with the given size, +pushes onto the stack a new full userdata with the block address, +and returns this address. + + +

+Userdata represent C values in Lua. +A full userdata represents a block of memory. +It is an object (like a table): +you must create it, it can have its own metatable, +and you can detect when it is being collected. +A full userdata is only equal to itself (under raw equality). + + +

+When Lua collects a full userdata with a gc metamethod, +Lua calls the metamethod and marks the userdata as finalized. +When this userdata is collected again then +Lua frees its corresponding memory. + + + + + +


lua_next

+[-1, +(2|0), e] +

int lua_next (lua_State *L, int index);
+ +

+Pops a key from the stack, +and pushes a key-value pair from the table at the given index +(the "next" pair after the given key). +If there are no more elements in the table, +then lua_next returns 0 (and pushes nothing). + + +

+A typical traversal looks like this: + +

+     /* table is in the stack at index 't' */
+     lua_pushnil(L);  /* first key */
+     while (lua_next(L, t) != 0) {
+       /* uses 'key' (at index -2) and 'value' (at index -1) */
+       printf("%s - %s\n",
+              lua_typename(L, lua_type(L, -2)),
+              lua_typename(L, lua_type(L, -1)));
+       /* removes 'value'; keeps 'key' for next iteration */
+       lua_pop(L, 1);
+     }
+
+ +

+While traversing a table, +do not call lua_tolstring directly on a key, +unless you know that the key is actually a string. +Recall that lua_tolstring changes +the value at the given index; +this confuses the next call to lua_next. + + + + + +


lua_Number

+
typedef double lua_Number;
+ +

+The type of numbers in Lua. +By default, it is double, but that can be changed in luaconf.h. + + +

+Through the configuration file you can change +Lua to operate with another type for numbers (e.g., float or long). + + + + + +


lua_objlen

+[-0, +0, -] +

size_t lua_objlen (lua_State *L, int index);
+ +

+Returns the "length" of the value at the given acceptable index: +for strings, this is the string length; +for tables, this is the result of the length operator ('#'); +for userdata, this is the size of the block of memory allocated +for the userdata; +for other values, it is 0. + + + + + +


lua_pcall

+[-(nargs + 1), +(nresults|1), -] +

int lua_pcall (lua_State *L, int nargs, int nresults, int errfunc);
+ +

+Calls a function in protected mode. + + +

+Both nargs and nresults have the same meaning as +in lua_call. +If there are no errors during the call, +lua_pcall behaves exactly like lua_call. +However, if there is any error, +lua_pcall catches it, +pushes a single value on the stack (the error message), +and returns an error code. +Like lua_call, +lua_pcall always removes the function +and its arguments from the stack. + + +

+If errfunc is 0, +then the error message returned on the stack +is exactly the original error message. +Otherwise, errfunc is the stack index of an +error handler function. +(In the current implementation, this index cannot be a pseudo-index.) +In case of runtime errors, +this function will be called with the error message +and its return value will be the message returned on the stack by lua_pcall. + + +

+Typically, the error handler function is used to add more debug +information to the error message, such as a stack traceback. +Such information cannot be gathered after the return of lua_pcall, +since by then the stack has unwound. + + +

+The lua_pcall function returns 0 in case of success +or one of the following error codes +(defined in lua.h): + +

    + +
  • LUA_ERRRUN: +a runtime error. +
  • + +
  • LUA_ERRMEM: +memory allocation error. +For such errors, Lua does not call the error handler function. +
  • + +
  • LUA_ERRERR: +error while running the error handler function. +
  • + +
+ + + + +

lua_pop

+[-n, +0, -] +

void lua_pop (lua_State *L, int n);
+ +

+Pops n elements from the stack. + + + + + +


lua_pushboolean

+[-0, +1, -] +

void lua_pushboolean (lua_State *L, int b);
+ +

+Pushes a boolean value with value b onto the stack. + + + + + +


lua_pushcclosure

+[-n, +1, m] +

void lua_pushcclosure (lua_State *L, lua_CFunction fn, int n);
+ +

+Pushes a new C closure onto the stack. + + +

+When a C function is created, +it is possible to associate some values with it, +thus creating a C closure (see §3.4); +these values are then accessible to the function whenever it is called. +To associate values with a C function, +first these values should be pushed onto the stack +(when there are multiple values, the first value is pushed first). +Then lua_pushcclosure +is called to create and push the C function onto the stack, +with the argument n telling how many values should be +associated with the function. +lua_pushcclosure also pops these values from the stack. + + +

+The maximum value for n is 255. + + + + + +


lua_pushcfunction

+[-0, +1, m] +

void lua_pushcfunction (lua_State *L, lua_CFunction f);
+ +

+Pushes a C function onto the stack. +This function receives a pointer to a C function +and pushes onto the stack a Lua value of type function that, +when called, invokes the corresponding C function. + + +

+Any function to be registered in Lua must +follow the correct protocol to receive its parameters +and return its results (see lua_CFunction). + + +

+lua_pushcfunction is defined as a macro: + +

+     #define lua_pushcfunction(L,f)  lua_pushcclosure(L,f,0)
+
+ + + + +

lua_pushfstring

+[-0, +1, m] +

const char *lua_pushfstring (lua_State *L, const char *fmt, ...);
+ +

+Pushes onto the stack a formatted string +and returns a pointer to this string. +It is similar to the C function sprintf, +but has some important differences: + +

    + +
  • +You do not have to allocate space for the result: +the result is a Lua string and Lua takes care of memory allocation +(and deallocation, through garbage collection). +
  • + +
  • +The conversion specifiers are quite restricted. +There are no flags, widths, or precisions. +The conversion specifiers can only be +'%%' (inserts a '%' in the string), +'%s' (inserts a zero-terminated string, with no size restrictions), +'%f' (inserts a lua_Number), +'%p' (inserts a pointer as a hexadecimal numeral), +'%d' (inserts an int), and +'%c' (inserts an int as a character). +
  • + +
+ + + + +

lua_pushinteger

+[-0, +1, -] +

void lua_pushinteger (lua_State *L, lua_Integer n);
+ +

+Pushes a number with value n onto the stack. + + + + + +


lua_pushlightuserdata

+[-0, +1, -] +

void lua_pushlightuserdata (lua_State *L, void *p);
+ +

+Pushes a light userdata onto the stack. + + +

+Userdata represent C values in Lua. +A light userdata represents a pointer. +It is a value (like a number): +you do not create it, it has no individual metatable, +and it is not collected (as it was never created). +A light userdata is equal to "any" +light userdata with the same C address. + + + + + +


lua_pushliteral

+[-0, +1, m] +

void lua_pushliteral (lua_State *L, const char *s);
+ +

+This macro is equivalent to lua_pushlstring, +but can be used only when s is a literal string. +In these cases, it automatically provides the string length. + + + + + +


lua_pushlstring

+[-0, +1, m] +

void lua_pushlstring (lua_State *L, const char *s, size_t len);
+ +

+Pushes the string pointed to by s with size len +onto the stack. +Lua makes (or reuses) an internal copy of the given string, +so the memory at s can be freed or reused immediately after +the function returns. +The string can contain embedded zeros. + + + + + +


lua_pushnil

+[-0, +1, -] +

void lua_pushnil (lua_State *L);
+ +

+Pushes a nil value onto the stack. + + + + + +


lua_pushnumber

+[-0, +1, -] +

void lua_pushnumber (lua_State *L, lua_Number n);
+ +

+Pushes a number with value n onto the stack. + + + + + +


lua_pushstring

+[-0, +1, m] +

void lua_pushstring (lua_State *L, const char *s);
+ +

+Pushes the zero-terminated string pointed to by s +onto the stack. +Lua makes (or reuses) an internal copy of the given string, +so the memory at s can be freed or reused immediately after +the function returns. +The string cannot contain embedded zeros; +it is assumed to end at the first zero. + + + + + +


lua_pushthread

+[-0, +1, -] +

int lua_pushthread (lua_State *L);
+ +

+Pushes the thread represented by L onto the stack. +Returns 1 if this thread is the main thread of its state. + + + + + +


lua_pushvalue

+[-0, +1, -] +

void lua_pushvalue (lua_State *L, int index);
+ +

+Pushes a copy of the element at the given valid index +onto the stack. + + + + + +


lua_pushvfstring

+[-0, +1, m] +

const char *lua_pushvfstring (lua_State *L,
+                              const char *fmt,
+                              va_list argp);
+ +

+Equivalent to lua_pushfstring, except that it receives a va_list +instead of a variable number of arguments. + + + + + +


lua_rawequal

+[-0, +0, -] +

int lua_rawequal (lua_State *L, int index1, int index2);
+ +

+Returns 1 if the two values in acceptable indices index1 and +index2 are primitively equal +(that is, without calling metamethods). +Otherwise returns 0. +Also returns 0 if any of the indices are non valid. + + + + + +


lua_rawget

+[-1, +1, -] +

void lua_rawget (lua_State *L, int index);
+ +

+Similar to lua_gettable, but does a raw access +(i.e., without metamethods). + + + + + +


lua_rawgeti

+[-0, +1, -] +

void lua_rawgeti (lua_State *L, int index, int n);
+ +

+Pushes onto the stack the value t[n], +where t is the value at the given valid index. +The access is raw; +that is, it does not invoke metamethods. + + + + + +


lua_rawset

+[-2, +0, m] +

void lua_rawset (lua_State *L, int index);
+ +

+Similar to lua_settable, but does a raw assignment +(i.e., without metamethods). + + + + + +


lua_rawseti

+[-1, +0, m] +

void lua_rawseti (lua_State *L, int index, int n);
+ +

+Does the equivalent of t[n] = v, +where t is the value at the given valid index +and v is the value at the top of the stack. + + +

+This function pops the value from the stack. +The assignment is raw; +that is, it does not invoke metamethods. + + + + + +


lua_Reader

+
typedef const char * (*lua_Reader) (lua_State *L,
+                                    void *data,
+                                    size_t *size);
+ +

+The reader function used by lua_load. +Every time it needs another piece of the chunk, +lua_load calls the reader, +passing along its data parameter. +The reader must return a pointer to a block of memory +with a new piece of the chunk +and set size to the block size. +The block must exist until the reader function is called again. +To signal the end of the chunk, +the reader must return NULL or set size to zero. +The reader function may return pieces of any size greater than zero. + + + + + +


lua_register

+[-0, +0, e] +

void lua_register (lua_State *L,
+                   const char *name,
+                   lua_CFunction f);
+ +

+Sets the C function f as the new value of global name. +It is defined as a macro: + +

+     #define lua_register(L,n,f) \
+            (lua_pushcfunction(L, f), lua_setglobal(L, n))
+
+ + + + +

lua_remove

+[-1, +0, -] +

void lua_remove (lua_State *L, int index);
+ +

+Removes the element at the given valid index, +shifting down the elements above this index to fill the gap. +Cannot be called with a pseudo-index, +because a pseudo-index is not an actual stack position. + + + + + +


lua_replace

+[-1, +0, -] +

void lua_replace (lua_State *L, int index);
+ +

+Moves the top element into the given position (and pops it), +without shifting any element +(therefore replacing the value at the given position). + + + + + +


lua_resume

+[-?, +?, -] +

int lua_resume (lua_State *L, int narg);
+ +

+Starts and resumes a coroutine in a given thread. + + +

+To start a coroutine, you first create a new thread +(see lua_newthread); +then you push onto its stack the main function plus any arguments; +then you call lua_resume, +with narg being the number of arguments. +This call returns when the coroutine suspends or finishes its execution. +When it returns, the stack contains all values passed to lua_yield, +or all values returned by the body function. +lua_resume returns +LUA_YIELD if the coroutine yields, +0 if the coroutine finishes its execution +without errors, +or an error code in case of errors (see lua_pcall). +In case of errors, +the stack is not unwound, +so you can use the debug API over it. +The error message is on the top of the stack. +To restart a coroutine, you put on its stack only the values to +be passed as results from yield, +and then call lua_resume. + + + + + +


lua_setallocf

+[-0, +0, -] +

void lua_setallocf (lua_State *L, lua_Alloc f, void *ud);
+ +

+Changes the allocator function of a given state to f +with user data ud. + + + + + +


lua_setfenv

+[-1, +0, -] +

int lua_setfenv (lua_State *L, int index);
+ +

+Pops a table from the stack and sets it as +the new environment for the value at the given index. +If the value at the given index is +neither a function nor a thread nor a userdata, +lua_setfenv returns 0. +Otherwise it returns 1. + + + + + +


lua_setfield

+[-1, +0, e] +

void lua_setfield (lua_State *L, int index, const char *k);
+ +

+Does the equivalent to t[k] = v, +where t is the value at the given valid index +and v is the value at the top of the stack. + + +

+This function pops the value from the stack. +As in Lua, this function may trigger a metamethod +for the "newindex" event (see §2.8). + + + + + +


lua_setglobal

+[-1, +0, e] +

void lua_setglobal (lua_State *L, const char *name);
+ +

+Pops a value from the stack and +sets it as the new value of global name. +It is defined as a macro: + +

+     #define lua_setglobal(L,s)   lua_setfield(L, LUA_GLOBALSINDEX, s)
+
+ + + + +

lua_setmetatable

+[-1, +0, -] +

int lua_setmetatable (lua_State *L, int index);
+ +

+Pops a table from the stack and +sets it as the new metatable for the value at the given +acceptable index. + + + + + +


lua_settable

+[-2, +0, e] +

void lua_settable (lua_State *L, int index);
+ +

+Does the equivalent to t[k] = v, +where t is the value at the given valid index, +v is the value at the top of the stack, +and k is the value just below the top. + + +

+This function pops both the key and the value from the stack. +As in Lua, this function may trigger a metamethod +for the "newindex" event (see §2.8). + + + + + +


lua_settop

+[-?, +?, -] +

void lua_settop (lua_State *L, int index);
+ +

+Accepts any acceptable index, or 0, +and sets the stack top to this index. +If the new top is larger than the old one, +then the new elements are filled with nil. +If index is 0, then all stack elements are removed. + + + + + +


lua_State

+
typedef struct lua_State lua_State;
+ +

+Opaque structure that keeps the whole state of a Lua interpreter. +The Lua library is fully reentrant: +it has no global variables. +All information about a state is kept in this structure. + + +

+A pointer to this state must be passed as the first argument to +every function in the library, except to lua_newstate, +which creates a Lua state from scratch. + + + + + +


lua_status

+[-0, +0, -] +

int lua_status (lua_State *L);
+ +

+Returns the status of the thread L. + + +

+The status can be 0 for a normal thread, +an error code if the thread finished its execution with an error, +or LUA_YIELD if the thread is suspended. + + + + + +


lua_toboolean

+[-0, +0, -] +

int lua_toboolean (lua_State *L, int index);
+ +

+Converts the Lua value at the given acceptable index to a C boolean +value (0 or 1). +Like all tests in Lua, +lua_toboolean returns 1 for any Lua value +different from false and nil; +otherwise it returns 0. +It also returns 0 when called with a non-valid index. +(If you want to accept only actual boolean values, +use lua_isboolean to test the value's type.) + + + + + +


lua_tocfunction

+[-0, +0, -] +

lua_CFunction lua_tocfunction (lua_State *L, int index);
+ +

+Converts a value at the given acceptable index to a C function. +That value must be a C function; +otherwise, returns NULL. + + + + + +


lua_tointeger

+[-0, +0, -] +

lua_Integer lua_tointeger (lua_State *L, int index);
+ +

+Converts the Lua value at the given acceptable index +to the signed integral type lua_Integer. +The Lua value must be a number or a string convertible to a number +(see §2.2.1); +otherwise, lua_tointeger returns 0. + + +

+If the number is not an integer, +it is truncated in some non-specified way. + + + + + +


lua_tolstring

+[-0, +0, m] +

const char *lua_tolstring (lua_State *L, int index, size_t *len);
+ +

+Converts the Lua value at the given acceptable index to a C string. +If len is not NULL, +it also sets *len with the string length. +The Lua value must be a string or a number; +otherwise, the function returns NULL. +If the value is a number, +then lua_tolstring also +changes the actual value in the stack to a string. +(This change confuses lua_next +when lua_tolstring is applied to keys during a table traversal.) + + +

+lua_tolstring returns a fully aligned pointer +to a string inside the Lua state. +This string always has a zero ('\0') +after its last character (as in C), +but can contain other zeros in its body. +Because Lua has garbage collection, +there is no guarantee that the pointer returned by lua_tolstring +will be valid after the corresponding value is removed from the stack. + + + + + +


lua_tonumber

+[-0, +0, -] +

lua_Number lua_tonumber (lua_State *L, int index);
+ +

+Converts the Lua value at the given acceptable index +to the C type lua_Number (see lua_Number). +The Lua value must be a number or a string convertible to a number +(see §2.2.1); +otherwise, lua_tonumber returns 0. + + + + + +


lua_topointer

+[-0, +0, -] +

const void *lua_topointer (lua_State *L, int index);
+ +

+Converts the value at the given acceptable index to a generic +C pointer (void*). +The value can be a userdata, a table, a thread, or a function; +otherwise, lua_topointer returns NULL. +Different objects will give different pointers. +There is no way to convert the pointer back to its original value. + + +

+Typically this function is used only for debug information. + + + + + +


lua_tostring

+[-0, +0, m] +

const char *lua_tostring (lua_State *L, int index);
+ +

+Equivalent to lua_tolstring with len equal to NULL. + + + + + +


lua_tothread

+[-0, +0, -] +

lua_State *lua_tothread (lua_State *L, int index);
+ +

+Converts the value at the given acceptable index to a Lua thread +(represented as lua_State*). +This value must be a thread; +otherwise, the function returns NULL. + + + + + +


lua_touserdata

+[-0, +0, -] +

void *lua_touserdata (lua_State *L, int index);
+ +

+If the value at the given acceptable index is a full userdata, +returns its block address. +If the value is a light userdata, +returns its pointer. +Otherwise, returns NULL. + + + + + +


lua_type

+[-0, +0, -] +

int lua_type (lua_State *L, int index);
+ +

+Returns the type of the value in the given acceptable index, +or LUA_TNONE for a non-valid index +(that is, an index to an "empty" stack position). +The types returned by lua_type are coded by the following constants +defined in lua.h: +LUA_TNIL, +LUA_TNUMBER, +LUA_TBOOLEAN, +LUA_TSTRING, +LUA_TTABLE, +LUA_TFUNCTION, +LUA_TUSERDATA, +LUA_TTHREAD, +and +LUA_TLIGHTUSERDATA. + + + + + +


lua_typename

+[-0, +0, -] +

const char *lua_typename  (lua_State *L, int tp);
+ +

+Returns the name of the type encoded by the value tp, +which must be one the values returned by lua_type. + + + + + +


lua_Writer

+
typedef int (*lua_Writer) (lua_State *L,
+                           const void* p,
+                           size_t sz,
+                           void* ud);
+ +

+The type of the writer function used by lua_dump. +Every time it produces another piece of chunk, +lua_dump calls the writer, +passing along the buffer to be written (p), +its size (sz), +and the data parameter supplied to lua_dump. + + +

+The writer returns an error code: +0 means no errors; +any other value means an error and stops lua_dump from +calling the writer again. + + + + + +


lua_xmove

+[-?, +?, -] +

void lua_xmove (lua_State *from, lua_State *to, int n);
+ +

+Exchange values between different threads of the same global state. + + +

+This function pops n values from the stack from, +and pushes them onto the stack to. + + + + + +


lua_yield

+[-?, +?, -] +

int lua_yield  (lua_State *L, int nresults);
+ +

+Yields a coroutine. + + +

+This function should only be called as the +return expression of a C function, as follows: + +

+     return lua_yield (L, nresults);
+

+When a C function calls lua_yield in that way, +the running coroutine suspends its execution, +and the call to lua_resume that started this coroutine returns. +The parameter nresults is the number of values from the stack +that are passed as results to lua_resume. + + + + + + + +

3.8 - The Debug Interface

+ +

+Lua has no built-in debugging facilities. +Instead, it offers a special interface +by means of functions and hooks. +This interface allows the construction of different +kinds of debuggers, profilers, and other tools +that need "inside information" from the interpreter. + + + +


lua_Debug

+
typedef struct lua_Debug {
+  int event;
+  const char *name;           /* (n) */
+  const char *namewhat;       /* (n) */
+  const char *what;           /* (S) */
+  const char *source;         /* (S) */
+  int currentline;            /* (l) */
+  int nups;                   /* (u) number of upvalues */
+  int linedefined;            /* (S) */
+  int lastlinedefined;        /* (S) */
+  char short_src[LUA_IDSIZE]; /* (S) */
+  /* private part */
+  other fields
+} lua_Debug;
+ +

+A structure used to carry different pieces of +information about an active function. +lua_getstack fills only the private part +of this structure, for later use. +To fill the other fields of lua_Debug with useful information, +call lua_getinfo. + + +

+The fields of lua_Debug have the following meaning: + +

    + +
  • source: +If the function was defined in a string, +then source is that string. +If the function was defined in a file, +then source starts with a '@' followed by the file name. +
  • + +
  • short_src: +a "printable" version of source, to be used in error messages. +
  • + +
  • linedefined: +the line number where the definition of the function starts. +
  • + +
  • lastlinedefined: +the line number where the definition of the function ends. +
  • + +
  • what: +the string "Lua" if the function is a Lua function, +"C" if it is a C function, +"main" if it is the main part of a chunk, +and "tail" if it was a function that did a tail call. +In the latter case, +Lua has no other information about the function. +
  • + +
  • currentline: +the current line where the given function is executing. +When no line information is available, +currentline is set to -1. +
  • + +
  • name: +a reasonable name for the given function. +Because functions in Lua are first-class values, +they do not have a fixed name: +some functions can be the value of multiple global variables, +while others can be stored only in a table field. +The lua_getinfo function checks how the function was +called to find a suitable name. +If it cannot find a name, +then name is set to NULL. +
  • + +
  • namewhat: +explains the name field. +The value of namewhat can be +"global", "local", "method", +"field", "upvalue", or "" (the empty string), +according to how the function was called. +(Lua uses the empty string when no other option seems to apply.) +
  • + +
  • nups: +the number of upvalues of the function. +
  • + +
+ + + + +

lua_gethook

+[-0, +0, -] +

lua_Hook lua_gethook (lua_State *L);
+ +

+Returns the current hook function. + + + + + +


lua_gethookcount

+[-0, +0, -] +

int lua_gethookcount (lua_State *L);
+ +

+Returns the current hook count. + + + + + +


lua_gethookmask

+[-0, +0, -] +

int lua_gethookmask (lua_State *L);
+ +

+Returns the current hook mask. + + + + + +


lua_getinfo

+[-(0|1), +(0|1|2), m] +

int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar);
+ +

+Returns information about a specific function or function invocation. + + +

+To get information about a function invocation, +the parameter ar must be a valid activation record that was +filled by a previous call to lua_getstack or +given as argument to a hook (see lua_Hook). + + +

+To get information about a function you push it onto the stack +and start the what string with the character '>'. +(In that case, +lua_getinfo pops the function in the top of the stack.) +For instance, to know in which line a function f was defined, +you can write the following code: + +

+     lua_Debug ar;
+     lua_getfield(L, LUA_GLOBALSINDEX, "f");  /* get global 'f' */
+     lua_getinfo(L, ">S", &ar);
+     printf("%d\n", ar.linedefined);
+
+ +

+Each character in the string what +selects some fields of the structure ar to be filled or +a value to be pushed on the stack: + +

    + +
  • 'n': fills in the field name and namewhat; +
  • + +
  • 'S': +fills in the fields source, short_src, +linedefined, lastlinedefined, and what; +
  • + +
  • 'l': fills in the field currentline; +
  • + +
  • 'u': fills in the field nups; +
  • + +
  • 'f': +pushes onto the stack the function that is +running at the given level; +
  • + +
  • 'L': +pushes onto the stack a table whose indices are the +numbers of the lines that are valid on the function. +(A valid line is a line with some associated code, +that is, a line where you can put a break point. +Non-valid lines include empty lines and comments.) +
  • + +
+ +

+This function returns 0 on error +(for instance, an invalid option in what). + + + + + +


lua_getlocal

+[-0, +(0|1), -] +

const char *lua_getlocal (lua_State *L, lua_Debug *ar, int n);
+ +

+Gets information about a local variable of a given activation record. +The parameter ar must be a valid activation record that was +filled by a previous call to lua_getstack or +given as argument to a hook (see lua_Hook). +The index n selects which local variable to inspect +(1 is the first parameter or active local variable, and so on, +until the last active local variable). +lua_getlocal pushes the variable's value onto the stack +and returns its name. + + +

+Variable names starting with '(' (open parentheses) +represent internal variables +(loop control variables, temporaries, and C function locals). + + +

+Returns NULL (and pushes nothing) +when the index is greater than +the number of active local variables. + + + + + +


lua_getstack

+[-0, +0, -] +

int lua_getstack (lua_State *L, int level, lua_Debug *ar);
+ +

+Get information about the interpreter runtime stack. + + +

+This function fills parts of a lua_Debug structure with +an identification of the activation record +of the function executing at a given level. +Level 0 is the current running function, +whereas level n+1 is the function that has called level n. +When there are no errors, lua_getstack returns 1; +when called with a level greater than the stack depth, +it returns 0. + + + + + +


lua_getupvalue

+[-0, +(0|1), -] +

const char *lua_getupvalue (lua_State *L, int funcindex, int n);
+ +

+Gets information about a closure's upvalue. +(For Lua functions, +upvalues are the external local variables that the function uses, +and that are consequently included in its closure.) +lua_getupvalue gets the index n of an upvalue, +pushes the upvalue's value onto the stack, +and returns its name. +funcindex points to the closure in the stack. +(Upvalues have no particular order, +as they are active through the whole function. +So, they are numbered in an arbitrary order.) + + +

+Returns NULL (and pushes nothing) +when the index is greater than the number of upvalues. +For C functions, this function uses the empty string "" +as a name for all upvalues. + + + + + +


lua_Hook

+
typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar);
+ +

+Type for debugging hook functions. + + +

+Whenever a hook is called, its ar argument has its field +event set to the specific event that triggered the hook. +Lua identifies these events with the following constants: +LUA_HOOKCALL, LUA_HOOKRET, +LUA_HOOKTAILRET, LUA_HOOKLINE, +and LUA_HOOKCOUNT. +Moreover, for line events, the field currentline is also set. +To get the value of any other field in ar, +the hook must call lua_getinfo. +For return events, event can be LUA_HOOKRET, +the normal value, or LUA_HOOKTAILRET. +In the latter case, Lua is simulating a return from +a function that did a tail call; +in this case, it is useless to call lua_getinfo. + + +

+While Lua is running a hook, it disables other calls to hooks. +Therefore, if a hook calls back Lua to execute a function or a chunk, +this execution occurs without any calls to hooks. + + + + + +


lua_sethook

+[-0, +0, -] +

int lua_sethook (lua_State *L, lua_Hook f, int mask, int count);
+ +

+Sets the debugging hook function. + + +

+Argument f is the hook function. +mask specifies on which events the hook will be called: +it is formed by a bitwise or of the constants +LUA_MASKCALL, +LUA_MASKRET, +LUA_MASKLINE, +and LUA_MASKCOUNT. +The count argument is only meaningful when the mask +includes LUA_MASKCOUNT. +For each event, the hook is called as explained below: + +

    + +
  • The call hook: is called when the interpreter calls a function. +The hook is called just after Lua enters the new function, +before the function gets its arguments. +
  • + +
  • The return hook: is called when the interpreter returns from a function. +The hook is called just before Lua leaves the function. +You have no access to the values to be returned by the function. +
  • + +
  • The line hook: is called when the interpreter is about to +start the execution of a new line of code, +or when it jumps back in the code (even to the same line). +(This event only happens while Lua is executing a Lua function.) +
  • + +
  • The count hook: is called after the interpreter executes every +count instructions. +(This event only happens while Lua is executing a Lua function.) +
  • + +
+ +

+A hook is disabled by setting mask to zero. + + + + + +


lua_setlocal

+[-(0|1), +0, -] +

const char *lua_setlocal (lua_State *L, lua_Debug *ar, int n);
+ +

+Sets the value of a local variable of a given activation record. +Parameters ar and n are as in lua_getlocal +(see lua_getlocal). +lua_setlocal assigns the value at the top of the stack +to the variable and returns its name. +It also pops the value from the stack. + + +

+Returns NULL (and pops nothing) +when the index is greater than +the number of active local variables. + + + + + +


lua_setupvalue

+[-(0|1), +0, -] +

const char *lua_setupvalue (lua_State *L, int funcindex, int n);
+ +

+Sets the value of a closure's upvalue. +It assigns the value at the top of the stack +to the upvalue and returns its name. +It also pops the value from the stack. +Parameters funcindex and n are as in the lua_getupvalue +(see lua_getupvalue). + + +

+Returns NULL (and pops nothing) +when the index is greater than the number of upvalues. + + + + + + + +

4 - The Auxiliary Library

+ +

+ +The auxiliary library provides several convenient functions +to interface C with Lua. +While the basic API provides the primitive functions for all +interactions between C and Lua, +the auxiliary library provides higher-level functions for some +common tasks. + + +

+All functions from the auxiliary library +are defined in header file lauxlib.h and +have a prefix luaL_. + + +

+All functions in the auxiliary library are built on +top of the basic API, +and so they provide nothing that cannot be done with this API. + + +

+Several functions in the auxiliary library are used to +check C function arguments. +Their names are always luaL_check* or luaL_opt*. +All of these functions throw an error if the check is not satisfied. +Because the error message is formatted for arguments +(e.g., "bad argument #1"), +you should not use these functions for other stack values. + + + +

4.1 - Functions and Types

+ +

+Here we list all functions and types from the auxiliary library +in alphabetical order. + + + +


luaL_addchar

+[-0, +0, m] +

void luaL_addchar (luaL_Buffer *B, char c);
+ +

+Adds the character c to the buffer B +(see luaL_Buffer). + + + + + +


luaL_addlstring

+[-0, +0, m] +

void luaL_addlstring (luaL_Buffer *B, const char *s, size_t l);
+ +

+Adds the string pointed to by s with length l to +the buffer B +(see luaL_Buffer). +The string may contain embedded zeros. + + + + + +


luaL_addsize

+[-0, +0, m] +

void luaL_addsize (luaL_Buffer *B, size_t n);
+ +

+Adds to the buffer B (see luaL_Buffer) +a string of length n previously copied to the +buffer area (see luaL_prepbuffer). + + + + + +


luaL_addstring

+[-0, +0, m] +

void luaL_addstring (luaL_Buffer *B, const char *s);
+ +

+Adds the zero-terminated string pointed to by s +to the buffer B +(see luaL_Buffer). +The string may not contain embedded zeros. + + + + + +


luaL_addvalue

+[-1, +0, m] +

void luaL_addvalue (luaL_Buffer *B);
+ +

+Adds the value at the top of the stack +to the buffer B +(see luaL_Buffer). +Pops the value. + + +

+This is the only function on string buffers that can (and must) +be called with an extra element on the stack, +which is the value to be added to the buffer. + + + + + +


luaL_argcheck

+[-0, +0, v] +

void luaL_argcheck (lua_State *L,
+                    int cond,
+                    int narg,
+                    const char *extramsg);
+ +

+Checks whether cond is true. +If not, raises an error with the following message, +where func is retrieved from the call stack: + +

+     bad argument #<narg> to <func> (<extramsg>)
+
+ + + + +

luaL_argerror

+[-0, +0, v] +

int luaL_argerror (lua_State *L, int narg, const char *extramsg);
+ +

+Raises an error with the following message, +where func is retrieved from the call stack: + +

+     bad argument #<narg> to <func> (<extramsg>)
+
+ +

+This function never returns, +but it is an idiom to use it in C functions +as return luaL_argerror(args). + + + + + +


luaL_Buffer

+
typedef struct luaL_Buffer luaL_Buffer;
+ +

+Type for a string buffer. + + +

+A string buffer allows C code to build Lua strings piecemeal. +Its pattern of use is as follows: + +

    + +
  • First you declare a variable b of type luaL_Buffer.
  • + +
  • Then you initialize it with a call luaL_buffinit(L, &b).
  • + +
  • +Then you add string pieces to the buffer calling any of +the luaL_add* functions. +
  • + +
  • +You finish by calling luaL_pushresult(&b). +This call leaves the final string on the top of the stack. +
  • + +
+ +

+During its normal operation, +a string buffer uses a variable number of stack slots. +So, while using a buffer, you cannot assume that you know where +the top of the stack is. +You can use the stack between successive calls to buffer operations +as long as that use is balanced; +that is, +when you call a buffer operation, +the stack is at the same level +it was immediately after the previous buffer operation. +(The only exception to this rule is luaL_addvalue.) +After calling luaL_pushresult the stack is back to its +level when the buffer was initialized, +plus the final string on its top. + + + + + +


luaL_buffinit

+[-0, +0, -] +

void luaL_buffinit (lua_State *L, luaL_Buffer *B);
+ +

+Initializes a buffer B. +This function does not allocate any space; +the buffer must be declared as a variable +(see luaL_Buffer). + + + + + +


luaL_callmeta

+[-0, +(0|1), e] +

int luaL_callmeta (lua_State *L, int obj, const char *e);
+ +

+Calls a metamethod. + + +

+If the object at index obj has a metatable and this +metatable has a field e, +this function calls this field and passes the object as its only argument. +In this case this function returns 1 and pushes onto the +stack the value returned by the call. +If there is no metatable or no metamethod, +this function returns 0 (without pushing any value on the stack). + + + + + +


luaL_checkany

+[-0, +0, v] +

void luaL_checkany (lua_State *L, int narg);
+ +

+Checks whether the function has an argument +of any type (including nil) at position narg. + + + + + +


luaL_checkint

+[-0, +0, v] +

int luaL_checkint (lua_State *L, int narg);
+ +

+Checks whether the function argument narg is a number +and returns this number cast to an int. + + + + + +


luaL_checkinteger

+[-0, +0, v] +

lua_Integer luaL_checkinteger (lua_State *L, int narg);
+ +

+Checks whether the function argument narg is a number +and returns this number cast to a lua_Integer. + + + + + +


luaL_checklong

+[-0, +0, v] +

long luaL_checklong (lua_State *L, int narg);
+ +

+Checks whether the function argument narg is a number +and returns this number cast to a long. + + + + + +


luaL_checklstring

+[-0, +0, v] +

const char *luaL_checklstring (lua_State *L, int narg, size_t *l);
+ +

+Checks whether the function argument narg is a string +and returns this string; +if l is not NULL fills *l +with the string's length. + + +

+This function uses lua_tolstring to get its result, +so all conversions and caveats of that function apply here. + + + + + +


luaL_checknumber

+[-0, +0, v] +

lua_Number luaL_checknumber (lua_State *L, int narg);
+ +

+Checks whether the function argument narg is a number +and returns this number. + + + + + +


luaL_checkoption

+[-0, +0, v] +

int luaL_checkoption (lua_State *L,
+                      int narg,
+                      const char *def,
+                      const char *const lst[]);
+ +

+Checks whether the function argument narg is a string and +searches for this string in the array lst +(which must be NULL-terminated). +Returns the index in the array where the string was found. +Raises an error if the argument is not a string or +if the string cannot be found. + + +

+If def is not NULL, +the function uses def as a default value when +there is no argument narg or if this argument is nil. + + +

+This is a useful function for mapping strings to C enums. +(The usual convention in Lua libraries is +to use strings instead of numbers to select options.) + + + + + +


luaL_checkstack

+[-0, +0, v] +

void luaL_checkstack (lua_State *L, int sz, const char *msg);
+ +

+Grows the stack size to top + sz elements, +raising an error if the stack cannot grow to that size. +msg is an additional text to go into the error message. + + + + + +


luaL_checkstring

+[-0, +0, v] +

const char *luaL_checkstring (lua_State *L, int narg);
+ +

+Checks whether the function argument narg is a string +and returns this string. + + +

+This function uses lua_tolstring to get its result, +so all conversions and caveats of that function apply here. + + + + + +


luaL_checktype

+[-0, +0, v] +

void luaL_checktype (lua_State *L, int narg, int t);
+ +

+Checks whether the function argument narg has type t. +See lua_type for the encoding of types for t. + + + + + +


luaL_checkudata

+[-0, +0, v] +

void *luaL_checkudata (lua_State *L, int narg, const char *tname);
+ +

+Checks whether the function argument narg is a userdata +of the type tname (see luaL_newmetatable). + + + + + +


luaL_dofile

+[-0, +?, m] +

int luaL_dofile (lua_State *L, const char *filename);
+ +

+Loads and runs the given file. +It is defined as the following macro: + +

+     (luaL_loadfile(L, filename) || lua_pcall(L, 0, LUA_MULTRET, 0))
+

+It returns 0 if there are no errors +or 1 in case of errors. + + + + + +


luaL_dostring

+[-0, +?, m] +

int luaL_dostring (lua_State *L, const char *str);
+ +

+Loads and runs the given string. +It is defined as the following macro: + +

+     (luaL_loadstring(L, str) || lua_pcall(L, 0, LUA_MULTRET, 0))
+

+It returns 0 if there are no errors +or 1 in case of errors. + + + + + +


luaL_error

+[-0, +0, v] +

int luaL_error (lua_State *L, const char *fmt, ...);
+ +

+Raises an error. +The error message format is given by fmt +plus any extra arguments, +following the same rules of lua_pushfstring. +It also adds at the beginning of the message the file name and +the line number where the error occurred, +if this information is available. + + +

+This function never returns, +but it is an idiom to use it in C functions +as return luaL_error(args). + + + + + +


luaL_getmetafield

+[-0, +(0|1), m] +

int luaL_getmetafield (lua_State *L, int obj, const char *e);
+ +

+Pushes onto the stack the field e from the metatable +of the object at index obj. +If the object does not have a metatable, +or if the metatable does not have this field, +returns 0 and pushes nothing. + + + + + +


luaL_getmetatable

+[-0, +1, -] +

void luaL_getmetatable (lua_State *L, const char *tname);
+ +

+Pushes onto the stack the metatable associated with name tname +in the registry (see luaL_newmetatable). + + + + + +


luaL_gsub

+[-0, +1, m] +

const char *luaL_gsub (lua_State *L,
+                       const char *s,
+                       const char *p,
+                       const char *r);
+ +

+Creates a copy of string s by replacing +any occurrence of the string p +with the string r. +Pushes the resulting string on the stack and returns it. + + + + + +


luaL_loadbuffer

+[-0, +1, m] +

int luaL_loadbuffer (lua_State *L,
+                     const char *buff,
+                     size_t sz,
+                     const char *name);
+ +

+Loads a buffer as a Lua chunk. +This function uses lua_load to load the chunk in the +buffer pointed to by buff with size sz. + + +

+This function returns the same results as lua_load. +name is the chunk name, +used for debug information and error messages. + + + + + +


luaL_loadfile

+[-0, +1, m] +

int luaL_loadfile (lua_State *L, const char *filename);
+ +

+Loads a file as a Lua chunk. +This function uses lua_load to load the chunk in the file +named filename. +If filename is NULL, +then it loads from the standard input. +The first line in the file is ignored if it starts with a #. + + +

+This function returns the same results as lua_load, +but it has an extra error code LUA_ERRFILE +if it cannot open/read the file. + + +

+As lua_load, this function only loads the chunk; +it does not run it. + + + + + +


luaL_loadstring

+[-0, +1, m] +

int luaL_loadstring (lua_State *L, const char *s);
+ +

+Loads a string as a Lua chunk. +This function uses lua_load to load the chunk in +the zero-terminated string s. + + +

+This function returns the same results as lua_load. + + +

+Also as lua_load, this function only loads the chunk; +it does not run it. + + + + + +


luaL_newmetatable

+[-0, +1, m] +

int luaL_newmetatable (lua_State *L, const char *tname);
+ +

+If the registry already has the key tname, +returns 0. +Otherwise, +creates a new table to be used as a metatable for userdata, +adds it to the registry with key tname, +and returns 1. + + +

+In both cases pushes onto the stack the final value associated +with tname in the registry. + + + + + +


luaL_newstate

+[-0, +0, -] +

lua_State *luaL_newstate (void);
+ +

+Creates a new Lua state. +It calls lua_newstate with an +allocator based on the standard C realloc function +and then sets a panic function (see lua_atpanic) that prints +an error message to the standard error output in case of fatal +errors. + + +

+Returns the new state, +or NULL if there is a memory allocation error. + + + + + +


luaL_openlibs

+[-0, +0, m] +

void luaL_openlibs (lua_State *L);
+ +

+Opens all standard Lua libraries into the given state. + + + + + +


luaL_optint

+[-0, +0, v] +

int luaL_optint (lua_State *L, int narg, int d);
+ +

+If the function argument narg is a number, +returns this number cast to an int. +If this argument is absent or is nil, +returns d. +Otherwise, raises an error. + + + + + +


luaL_optinteger

+[-0, +0, v] +

lua_Integer luaL_optinteger (lua_State *L,
+                             int narg,
+                             lua_Integer d);
+ +

+If the function argument narg is a number, +returns this number cast to a lua_Integer. +If this argument is absent or is nil, +returns d. +Otherwise, raises an error. + + + + + +


luaL_optlong

+[-0, +0, v] +

long luaL_optlong (lua_State *L, int narg, long d);
+ +

+If the function argument narg is a number, +returns this number cast to a long. +If this argument is absent or is nil, +returns d. +Otherwise, raises an error. + + + + + +


luaL_optlstring

+[-0, +0, v] +

const char *luaL_optlstring (lua_State *L,
+                             int narg,
+                             const char *d,
+                             size_t *l);
+ +

+If the function argument narg is a string, +returns this string. +If this argument is absent or is nil, +returns d. +Otherwise, raises an error. + + +

+If l is not NULL, +fills the position *l with the results's length. + + + + + +


luaL_optnumber

+[-0, +0, v] +

lua_Number luaL_optnumber (lua_State *L, int narg, lua_Number d);
+ +

+If the function argument narg is a number, +returns this number. +If this argument is absent or is nil, +returns d. +Otherwise, raises an error. + + + + + +


luaL_optstring

+[-0, +0, v] +

const char *luaL_optstring (lua_State *L,
+                            int narg,
+                            const char *d);
+ +

+If the function argument narg is a string, +returns this string. +If this argument is absent or is nil, +returns d. +Otherwise, raises an error. + + + + + +


luaL_prepbuffer

+[-0, +0, -] +

char *luaL_prepbuffer (luaL_Buffer *B);
+ +

+Returns an address to a space of size LUAL_BUFFERSIZE +where you can copy a string to be added to buffer B +(see luaL_Buffer). +After copying the string into this space you must call +luaL_addsize with the size of the string to actually add +it to the buffer. + + + + + +


luaL_pushresult

+[-?, +1, m] +

void luaL_pushresult (luaL_Buffer *B);
+ +

+Finishes the use of buffer B leaving the final string on +the top of the stack. + + + + + +


luaL_ref

+[-1, +0, m] +

int luaL_ref (lua_State *L, int t);
+ +

+Creates and returns a reference, +in the table at index t, +for the object at the top of the stack (and pops the object). + + +

+A reference is a unique integer key. +As long as you do not manually add integer keys into table t, +luaL_ref ensures the uniqueness of the key it returns. +You can retrieve an object referred by reference r +by calling lua_rawgeti(L, t, r). +Function luaL_unref frees a reference and its associated object. + + +

+If the object at the top of the stack is nil, +luaL_ref returns the constant LUA_REFNIL. +The constant LUA_NOREF is guaranteed to be different +from any reference returned by luaL_ref. + + + + + +


luaL_Reg

+
typedef struct luaL_Reg {
+  const char *name;
+  lua_CFunction func;
+} luaL_Reg;
+ +

+Type for arrays of functions to be registered by +luaL_register. +name is the function name and func is a pointer to +the function. +Any array of luaL_Reg must end with an sentinel entry +in which both name and func are NULL. + + + + + +


luaL_register

+[-(0|1), +1, m] +

void luaL_register (lua_State *L,
+                    const char *libname,
+                    const luaL_Reg *l);
+ +

+Opens a library. + + +

+When called with libname equal to NULL, +it simply registers all functions in the list l +(see luaL_Reg) into the table on the top of the stack. + + +

+When called with a non-null libname, +luaL_register creates a new table t, +sets it as the value of the global variable libname, +sets it as the value of package.loaded[libname], +and registers on it all functions in the list l. +If there is a table in package.loaded[libname] or in +variable libname, +reuses this table instead of creating a new one. + + +

+In any case the function leaves the table +on the top of the stack. + + + + + +


luaL_typename

+[-0, +0, -] +

const char *luaL_typename (lua_State *L, int index);
+ +

+Returns the name of the type of the value at the given index. + + + + + +


luaL_typerror

+[-0, +0, v] +

int luaL_typerror (lua_State *L, int narg, const char *tname);
+ +

+Generates an error with a message like the following: + +

+     location: bad argument narg to 'func' (tname expected, got rt)
+

+where location is produced by luaL_where, +func is the name of the current function, +and rt is the type name of the actual argument. + + + + + +


luaL_unref

+[-0, +0, -] +

void luaL_unref (lua_State *L, int t, int ref);
+ +

+Releases reference ref from the table at index t +(see luaL_ref). +The entry is removed from the table, +so that the referred object can be collected. +The reference ref is also freed to be used again. + + +

+If ref is LUA_NOREF or LUA_REFNIL, +luaL_unref does nothing. + + + + + +


luaL_where

+[-0, +1, m] +

void luaL_where (lua_State *L, int lvl);
+ +

+Pushes onto the stack a string identifying the current position +of the control at level lvl in the call stack. +Typically this string has the following format: + +

+     chunkname:currentline:
+

+Level 0 is the running function, +level 1 is the function that called the running function, +etc. + + +

+This function is used to build a prefix for error messages. + + + + + + + +

5 - Standard Libraries

+ +

+The standard Lua libraries provide useful functions +that are implemented directly through the C API. +Some of these functions provide essential services to the language +(e.g., type and getmetatable); +others provide access to "outside" services (e.g., I/O); +and others could be implemented in Lua itself, +but are quite useful or have critical performance requirements that +deserve an implementation in C (e.g., table.sort). + + +

+All libraries are implemented through the official C API +and are provided as separate C modules. +Currently, Lua has the following standard libraries: + +

    + +
  • basic library, which includes the coroutine sub-library;
  • + +
  • package library;
  • + +
  • string manipulation;
  • + +
  • table manipulation;
  • + +
  • mathematical functions (sin, log, etc.);
  • + +
  • input and output;
  • + +
  • operating system facilities;
  • + +
  • debug facilities.
  • + +

+Except for the basic and package libraries, +each library provides all its functions as fields of a global table +or as methods of its objects. + + +

+To have access to these libraries, +the C host program should call the luaL_openlibs function, +which opens all standard libraries. +Alternatively, +it can open them individually by calling +luaopen_base (for the basic library), +luaopen_package (for the package library), +luaopen_string (for the string library), +luaopen_table (for the table library), +luaopen_math (for the mathematical library), +luaopen_io (for the I/O library), +luaopen_os (for the Operating System library), +and luaopen_debug (for the debug library). +These functions are declared in lualib.h +and should not be called directly: +you must call them like any other Lua C function, +e.g., by using lua_call. + + + +

5.1 - Basic Functions

+ +

+The basic library provides some core functions to Lua. +If you do not include this library in your application, +you should check carefully whether you need to provide +implementations for some of its facilities. + + +

+


assert (v [, message])

+Issues an error when +the value of its argument v is false (i.e., nil or false); +otherwise, returns all its arguments. +message is an error message; +when absent, it defaults to "assertion failed!" + + + + +

+


collectgarbage ([opt [, arg]])

+ + +

+This function is a generic interface to the garbage collector. +It performs different functions according to its first argument, opt: + +

    + +
  • "collect": +performs a full garbage-collection cycle. +This is the default option. +
  • + +
  • "stop": +stops the garbage collector. +
  • + +
  • "restart": +restarts the garbage collector. +
  • + +
  • "count": +returns the total memory in use by Lua (in Kbytes). +
  • + +
  • "step": +performs a garbage-collection step. +The step "size" is controlled by arg +(larger values mean more steps) in a non-specified way. +If you want to control the step size +you must experimentally tune the value of arg. +Returns true if the step finished a collection cycle. +
  • + +
  • "setpause": +sets arg as the new value for the pause of +the collector (see §2.10). +Returns the previous value for pause. +
  • + +
  • "setstepmul": +sets arg as the new value for the step multiplier of +the collector (see §2.10). +Returns the previous value for step. +
  • + +
+ + + +

+


dofile ([filename])

+Opens the named file and executes its contents as a Lua chunk. +When called without arguments, +dofile executes the contents of the standard input (stdin). +Returns all values returned by the chunk. +In case of errors, dofile propagates the error +to its caller (that is, dofile does not run in protected mode). + + + + +

+


error (message [, level])

+Terminates the last protected function called +and returns message as the error message. +Function error never returns. + + +

+Usually, error adds some information about the error position +at the beginning of the message. +The level argument specifies how to get the error position. +With level 1 (the default), the error position is where the +error function was called. +Level 2 points the error to where the function +that called error was called; and so on. +Passing a level 0 avoids the addition of error position information +to the message. + + + + +

+


_G

+A global variable (not a function) that +holds the global environment (that is, _G._G = _G). +Lua itself does not use this variable; +changing its value does not affect any environment, +nor vice-versa. +(Use setfenv to change environments.) + + + + +

+


getfenv ([f])

+Returns the current environment in use by the function. +f can be a Lua function or a number +that specifies the function at that stack level: +Level 1 is the function calling getfenv. +If the given function is not a Lua function, +or if f is 0, +getfenv returns the global environment. +The default for f is 1. + + + + +

+


getmetatable (object)

+ + +

+If object does not have a metatable, returns nil. +Otherwise, +if the object's metatable has a "__metatable" field, +returns the associated value. +Otherwise, returns the metatable of the given object. + + + + +

+


ipairs (t)

+ + +

+Returns three values: an iterator function, the table t, and 0, +so that the construction + +

+     for i,v in ipairs(t) do body end
+

+will iterate over the pairs (1,t[1]), (2,t[2]), ···, +up to the first integer key absent from the table. + + + + +

+


load (func [, chunkname])

+ + +

+Loads a chunk using function func to get its pieces. +Each call to func must return a string that concatenates +with previous results. +A return of an empty string, nil, or no value signals the end of the chunk. + + +

+If there are no errors, +returns the compiled chunk as a function; +otherwise, returns nil plus the error message. +The environment of the returned function is the global environment. + + +

+chunkname is used as the chunk name for error messages +and debug information. +When absent, +it defaults to "=(load)". + + + + +

+


loadfile ([filename])

+ + +

+Similar to load, +but gets the chunk from file filename +or from the standard input, +if no file name is given. + + + + +

+


loadstring (string [, chunkname])

+ + +

+Similar to load, +but gets the chunk from the given string. + + +

+To load and run a given string, use the idiom + +

+     assert(loadstring(s))()
+
+ +

+When absent, +chunkname defaults to the given string. + + + + +

+


next (table [, index])

+ + +

+Allows a program to traverse all fields of a table. +Its first argument is a table and its second argument +is an index in this table. +next returns the next index of the table +and its associated value. +When called with nil as its second argument, +next returns an initial index +and its associated value. +When called with the last index, +or with nil in an empty table, +next returns nil. +If the second argument is absent, then it is interpreted as nil. +In particular, +you can use next(t) to check whether a table is empty. + + +

+The order in which the indices are enumerated is not specified, +even for numeric indices. +(To traverse a table in numeric order, +use a numerical for or the ipairs function.) + + +

+The behavior of next is undefined if, +during the traversal, +you assign any value to a non-existent field in the table. +You may however modify existing fields. +In particular, you may clear existing fields. + + + + +

+


pairs (t)

+ + +

+Returns three values: the next function, the table t, and nil, +so that the construction + +

+     for k,v in pairs(t) do body end
+

+will iterate over all key–value pairs of table t. + + +

+See function next for the caveats of modifying +the table during its traversal. + + + + +

+


pcall (f, arg1, ···)

+ + +

+Calls function f with +the given arguments in protected mode. +This means that any error inside f is not propagated; +instead, pcall catches the error +and returns a status code. +Its first result is the status code (a boolean), +which is true if the call succeeds without errors. +In such case, pcall also returns all results from the call, +after this first result. +In case of any error, pcall returns false plus the error message. + + + + +

+


print (···)

+Receives any number of arguments, +and prints their values to stdout, +using the tostring function to convert them to strings. +print is not intended for formatted output, +but only as a quick way to show a value, +typically for debugging. +For formatted output, use string.format. + + + + +

+


rawequal (v1, v2)

+Checks whether v1 is equal to v2, +without invoking any metamethod. +Returns a boolean. + + + + +

+


rawget (table, index)

+Gets the real value of table[index], +without invoking any metamethod. +table must be a table; +index may be any value. + + + + +

+


rawset (table, index, value)

+Sets the real value of table[index] to value, +without invoking any metamethod. +table must be a table, +index any value different from nil, +and value any Lua value. + + +

+This function returns table. + + + + +

+


select (index, ···)

+ + +

+If index is a number, +returns all arguments after argument number index. +Otherwise, index must be the string "#", +and select returns the total number of extra arguments it received. + + + + +

+


setfenv (f, table)

+ + +

+Sets the environment to be used by the given function. +f can be a Lua function or a number +that specifies the function at that stack level: +Level 1 is the function calling setfenv. +setfenv returns the given function. + + +

+As a special case, when f is 0 setfenv changes +the environment of the running thread. +In this case, setfenv returns no values. + + + + +

+


setmetatable (table, metatable)

+ + +

+Sets the metatable for the given table. +(You cannot change the metatable of other types from Lua, only from C.) +If metatable is nil, +removes the metatable of the given table. +If the original metatable has a "__metatable" field, +raises an error. + + +

+This function returns table. + + + + +

+


tonumber (e [, base])

+Tries to convert its argument to a number. +If the argument is already a number or a string convertible +to a number, then tonumber returns this number; +otherwise, it returns nil. + + +

+An optional argument specifies the base to interpret the numeral. +The base may be any integer between 2 and 36, inclusive. +In bases above 10, the letter 'A' (in either upper or lower case) +represents 10, 'B' represents 11, and so forth, +with 'Z' representing 35. +In base 10 (the default), the number can have a decimal part, +as well as an optional exponent part (see §2.1). +In other bases, only unsigned integers are accepted. + + + + +

+


tostring (e)

+Receives an argument of any type and +converts it to a string in a reasonable format. +For complete control of how numbers are converted, +use string.format. + + +

+If the metatable of e has a "__tostring" field, +then tostring calls the corresponding value +with e as argument, +and uses the result of the call as its result. + + + + +

+


type (v)

+Returns the type of its only argument, coded as a string. +The possible results of this function are +"nil" (a string, not the value nil), +"number", +"string", +"boolean", +"table", +"function", +"thread", +and "userdata". + + + + +

+


unpack (list [, i [, j]])

+Returns the elements from the given table. +This function is equivalent to + +
+     return list[i], list[i+1], ···, list[j]
+

+except that the above code can be written only for a fixed number +of elements. +By default, i is 1 and j is the length of the list, +as defined by the length operator (see §2.5.5). + + + + +

+


_VERSION

+A global variable (not a function) that +holds a string containing the current interpreter version. +The current contents of this variable is "Lua 5.1". + + + + +

+


xpcall (f, err)

+ + +

+This function is similar to pcall, +except that you can set a new error handler. + + +

+xpcall calls function f in protected mode, +using err as the error handler. +Any error inside f is not propagated; +instead, xpcall catches the error, +calls the err function with the original error object, +and returns a status code. +Its first result is the status code (a boolean), +which is true if the call succeeds without errors. +In this case, xpcall also returns all results from the call, +after this first result. +In case of any error, +xpcall returns false plus the result from err. + + + + + + + +

5.2 - Coroutine Manipulation

+ +

+The operations related to coroutines comprise a sub-library of +the basic library and come inside the table coroutine. +See §2.11 for a general description of coroutines. + + +

+


coroutine.create (f)

+ + +

+Creates a new coroutine, with body f. +f must be a Lua function. +Returns this new coroutine, +an object with type "thread". + + + + +

+


coroutine.resume (co [, val1, ···])

+ + +

+Starts or continues the execution of coroutine co. +The first time you resume a coroutine, +it starts running its body. +The values val1, ··· are passed +as the arguments to the body function. +If the coroutine has yielded, +resume restarts it; +the values val1, ··· are passed +as the results from the yield. + + +

+If the coroutine runs without any errors, +resume returns true plus any values passed to yield +(if the coroutine yields) or any values returned by the body function +(if the coroutine terminates). +If there is any error, +resume returns false plus the error message. + + + + +

+


coroutine.running ()

+ + +

+Returns the running coroutine, +or nil when called by the main thread. + + + + +

+


coroutine.status (co)

+ + +

+Returns the status of coroutine co, as a string: +"running", +if the coroutine is running (that is, it called status); +"suspended", if the coroutine is suspended in a call to yield, +or if it has not started running yet; +"normal" if the coroutine is active but not running +(that is, it has resumed another coroutine); +and "dead" if the coroutine has finished its body function, +or if it has stopped with an error. + + + + +

+


coroutine.wrap (f)

+ + +

+Creates a new coroutine, with body f. +f must be a Lua function. +Returns a function that resumes the coroutine each time it is called. +Any arguments passed to the function behave as the +extra arguments to resume. +Returns the same values returned by resume, +except the first boolean. +In case of error, propagates the error. + + + + +

+


coroutine.yield (···)

+ + +

+Suspends the execution of the calling coroutine. +The coroutine cannot be running a C function, +a metamethod, or an iterator. +Any arguments to yield are passed as extra results to resume. + + + + + + + +

5.3 - Modules

+ +

+The package library provides basic +facilities for loading and building modules in Lua. +It exports two of its functions directly in the global environment: +require and module. +Everything else is exported in a table package. + + +

+


module (name [, ···])

+ + +

+Creates a module. +If there is a table in package.loaded[name], +this table is the module. +Otherwise, if there is a global table t with the given name, +this table is the module. +Otherwise creates a new table t and +sets it as the value of the global name and +the value of package.loaded[name]. +This function also initializes t._NAME with the given name, +t._M with the module (t itself), +and t._PACKAGE with the package name +(the full module name minus last component; see below). +Finally, module sets t as the new environment +of the current function and the new value of package.loaded[name], +so that require returns t. + + +

+If name is a compound name +(that is, one with components separated by dots), +module creates (or reuses, if they already exist) +tables for each component. +For instance, if name is a.b.c, +then module stores the module table in field c of +field b of global a. + + +

+This function can receive optional options after +the module name, +where each option is a function to be applied over the module. + + + + +

+


require (modname)

+ + +

+Loads the given module. +The function starts by looking into the package.loaded table +to determine whether modname is already loaded. +If it is, then require returns the value stored +at package.loaded[modname]. +Otherwise, it tries to find a loader for the module. + + +

+To find a loader, +require is guided by the package.loaders array. +By changing this array, +we can change how require looks for a module. +The following explanation is based on the default configuration +for package.loaders. + + +

+First require queries package.preload[modname]. +If it has a value, +this value (which should be a function) is the loader. +Otherwise require searches for a Lua loader using the +path stored in package.path. +If that also fails, it searches for a C loader using the +path stored in package.cpath. +If that also fails, +it tries an all-in-one loader (see package.loaders). + + +

+Once a loader is found, +require calls the loader with a single argument, modname. +If the loader returns any value, +require assigns the returned value to package.loaded[modname]. +If the loader returns no value and +has not assigned any value to package.loaded[modname], +then require assigns true to this entry. +In any case, require returns the +final value of package.loaded[modname]. + + +

+If there is any error loading or running the module, +or if it cannot find any loader for the module, +then require signals an error. + + + + +

+


package.cpath

+ + +

+The path used by require to search for a C loader. + + +

+Lua initializes the C path package.cpath in the same way +it initializes the Lua path package.path, +using the environment variable LUA_CPATH +or a default path defined in luaconf.h. + + + + +

+ +


package.loaded

+ + +

+A table used by require to control which +modules are already loaded. +When you require a module modname and +package.loaded[modname] is not false, +require simply returns the value stored there. + + + + +

+


package.loaders

+ + +

+A table used by require to control how to load modules. + + +

+Each entry in this table is a searcher function. +When looking for a module, +require calls each of these searchers in ascending order, +with the module name (the argument given to require) as its +sole parameter. +The function can return another function (the module loader) +or a string explaining why it did not find that module +(or nil if it has nothing to say). +Lua initializes this table with four functions. + + +

+The first searcher simply looks for a loader in the +package.preload table. + + +

+The second searcher looks for a loader as a Lua library, +using the path stored at package.path. +A path is a sequence of templates separated by semicolons. +For each template, +the searcher will change each interrogation +mark in the template by filename, +which is the module name with each dot replaced by a +"directory separator" (such as "/" in Unix); +then it will try to open the resulting file name. +So, for instance, if the Lua path is the string + +

+     "./?.lua;./?.lc;/usr/local/?/init.lua"
+

+the search for a Lua file for module foo +will try to open the files +./foo.lua, ./foo.lc, and +/usr/local/foo/init.lua, in that order. + + +

+The third searcher looks for a loader as a C library, +using the path given by the variable package.cpath. +For instance, +if the C path is the string + +

+     "./?.so;./?.dll;/usr/local/?/init.so"
+

+the searcher for module foo +will try to open the files ./foo.so, ./foo.dll, +and /usr/local/foo/init.so, in that order. +Once it finds a C library, +this searcher first uses a dynamic link facility to link the +application with the library. +Then it tries to find a C function inside the library to +be used as the loader. +The name of this C function is the string "luaopen_" +concatenated with a copy of the module name where each dot +is replaced by an underscore. +Moreover, if the module name has a hyphen, +its prefix up to (and including) the first hyphen is removed. +For instance, if the module name is a.v1-b.c, +the function name will be luaopen_b_c. + + +

+The fourth searcher tries an all-in-one loader. +It searches the C path for a library for +the root name of the given module. +For instance, when requiring a.b.c, +it will search for a C library for a. +If found, it looks into it for an open function for +the submodule; +in our example, that would be luaopen_a_b_c. +With this facility, a package can pack several C submodules +into one single library, +with each submodule keeping its original open function. + + + + +

+


package.loadlib (libname, funcname)

+ + +

+Dynamically links the host program with the C library libname. +Inside this library, looks for a function funcname +and returns this function as a C function. +(So, funcname must follow the protocol (see lua_CFunction)). + + +

+This is a low-level function. +It completely bypasses the package and module system. +Unlike require, +it does not perform any path searching and +does not automatically adds extensions. +libname must be the complete file name of the C library, +including if necessary a path and extension. +funcname must be the exact name exported by the C library +(which may depend on the C compiler and linker used). + + +

+This function is not supported by ANSI C. +As such, it is only available on some platforms +(Windows, Linux, Mac OS X, Solaris, BSD, +plus other Unix systems that support the dlfcn standard). + + + + +

+


package.path

+ + +

+The path used by require to search for a Lua loader. + + +

+At start-up, Lua initializes this variable with +the value of the environment variable LUA_PATH or +with a default path defined in luaconf.h, +if the environment variable is not defined. +Any ";;" in the value of the environment variable +is replaced by the default path. + + + + +

+


package.preload

+ + +

+A table to store loaders for specific modules +(see require). + + + + +

+


package.seeall (module)

+ + +

+Sets a metatable for module with +its __index field referring to the global environment, +so that this module inherits values +from the global environment. +To be used as an option to function module. + + + + + + + +

5.4 - String Manipulation

+ +

+This library provides generic functions for string manipulation, +such as finding and extracting substrings, and pattern matching. +When indexing a string in Lua, the first character is at position 1 +(not at 0, as in C). +Indices are allowed to be negative and are interpreted as indexing backwards, +from the end of the string. +Thus, the last character is at position -1, and so on. + + +

+The string library provides all its functions inside the table +string. +It also sets a metatable for strings +where the __index field points to the string table. +Therefore, you can use the string functions in object-oriented style. +For instance, string.byte(s, i) +can be written as s:byte(i). + + +

+The string library assumes one-byte character encodings. + + +

+


string.byte (s [, i [, j]])

+Returns the internal numerical codes of the characters s[i], +s[i+1], ···, s[j]. +The default value for i is 1; +the default value for j is i. + + +

+Note that numerical codes are not necessarily portable across platforms. + + + + +

+


string.char (···)

+Receives zero or more integers. +Returns a string with length equal to the number of arguments, +in which each character has the internal numerical code equal +to its corresponding argument. + + +

+Note that numerical codes are not necessarily portable across platforms. + + + + +

+


string.dump (function)

+ + +

+Returns a string containing a binary representation of the given function, +so that a later loadstring on this string returns +a copy of the function. +function must be a Lua function without upvalues. + + + + +

+


string.find (s, pattern [, init [, plain]])

+Looks for the first match of +pattern in the string s. +If it finds a match, then find returns the indices of s +where this occurrence starts and ends; +otherwise, it returns nil. +A third, optional numerical argument init specifies +where to start the search; +its default value is 1 and can be negative. +A value of true as a fourth, optional argument plain +turns off the pattern matching facilities, +so the function does a plain "find substring" operation, +with no characters in pattern being considered "magic". +Note that if plain is given, then init must be given as well. + + +

+If the pattern has captures, +then in a successful match +the captured values are also returned, +after the two indices. + + + + +

+


string.format (formatstring, ···)

+Returns a formatted version of its variable number of arguments +following the description given in its first argument (which must be a string). +The format string follows the same rules as the printf family of +standard C functions. +The only differences are that the options/modifiers +*, l, L, n, p, +and h are not supported +and that there is an extra option, q. +The q option formats a string in a form suitable to be safely read +back by the Lua interpreter: +the string is written between double quotes, +and all double quotes, newlines, embedded zeros, +and backslashes in the string +are correctly escaped when written. +For instance, the call + +
+     string.format('%q', 'a string with "quotes" and \n new line')
+

+will produce the string: + +

+     "a string with \"quotes\" and \
+      new line"
+
+ +

+The options c, d, E, e, f, +g, G, i, o, u, X, and x all +expect a number as argument, +whereas q and s expect a string. + + +

+This function does not accept string values +containing embedded zeros, +except as arguments to the q option. + + + + +

+


string.gmatch (s, pattern)

+Returns an iterator function that, +each time it is called, +returns the next captures from pattern over string s. +If pattern specifies no captures, +then the whole match is produced in each call. + + +

+As an example, the following loop + +

+     s = "hello world from Lua"
+     for w in string.gmatch(s, "%a+") do
+       print(w)
+     end
+

+will iterate over all the words from string s, +printing one per line. +The next example collects all pairs key=value from the +given string into a table: + +

+     t = {}
+     s = "from=world, to=Lua"
+     for k, v in string.gmatch(s, "(%w+)=(%w+)") do
+       t[k] = v
+     end
+
+ +

+For this function, a '^' at the start of a pattern does not +work as an anchor, as this would prevent the iteration. + + + + +

+


string.gsub (s, pattern, repl [, n])

+Returns a copy of s +in which all (or the first n, if given) +occurrences of the pattern have been +replaced by a replacement string specified by repl, +which can be a string, a table, or a function. +gsub also returns, as its second value, +the total number of matches that occurred. + + +

+If repl is a string, then its value is used for replacement. +The character % works as an escape character: +any sequence in repl of the form %n, +with n between 1 and 9, +stands for the value of the n-th captured substring (see below). +The sequence %0 stands for the whole match. +The sequence %% stands for a single %. + + +

+If repl is a table, then the table is queried for every match, +using the first capture as the key; +if the pattern specifies no captures, +then the whole match is used as the key. + + +

+If repl is a function, then this function is called every time a +match occurs, with all captured substrings passed as arguments, +in order; +if the pattern specifies no captures, +then the whole match is passed as a sole argument. + + +

+If the value returned by the table query or by the function call +is a string or a number, +then it is used as the replacement string; +otherwise, if it is false or nil, +then there is no replacement +(that is, the original match is kept in the string). + + +

+Here are some examples: + +

+     x = string.gsub("hello world", "(%w+)", "%1 %1")
+     --> x="hello hello world world"
+     
+     x = string.gsub("hello world", "%w+", "%0 %0", 1)
+     --> x="hello hello world"
+     
+     x = string.gsub("hello world from Lua", "(%w+)%s*(%w+)", "%2 %1")
+     --> x="world hello Lua from"
+     
+     x = string.gsub("home = $HOME, user = $USER", "%$(%w+)", os.getenv)
+     --> x="home = /home/roberto, user = roberto"
+     
+     x = string.gsub("4+5 = $return 4+5$", "%$(.-)%$", function (s)
+           return loadstring(s)()
+         end)
+     --> x="4+5 = 9"
+     
+     local t = {name="lua", version="5.1"}
+     x = string.gsub("$name-$version.tar.gz", "%$(%w+)", t)
+     --> x="lua-5.1.tar.gz"
+
+ + + +

+


string.len (s)

+Receives a string and returns its length. +The empty string "" has length 0. +Embedded zeros are counted, +so "a\000bc\000" has length 5. + + + + +

+


string.lower (s)

+Receives a string and returns a copy of this string with all +uppercase letters changed to lowercase. +All other characters are left unchanged. +The definition of what an uppercase letter is depends on the current locale. + + + + +

+


string.match (s, pattern [, init])

+Looks for the first match of +pattern in the string s. +If it finds one, then match returns +the captures from the pattern; +otherwise it returns nil. +If pattern specifies no captures, +then the whole match is returned. +A third, optional numerical argument init specifies +where to start the search; +its default value is 1 and can be negative. + + + + +

+


string.rep (s, n)

+Returns a string that is the concatenation of n copies of +the string s. + + + + +

+


string.reverse (s)

+Returns a string that is the string s reversed. + + + + +

+


string.sub (s, i [, j])

+Returns the substring of s that +starts at i and continues until j; +i and j can be negative. +If j is absent, then it is assumed to be equal to -1 +(which is the same as the string length). +In particular, +the call string.sub(s,1,j) returns a prefix of s +with length j, +and string.sub(s, -i) returns a suffix of s +with length i. + + + + +

+


string.upper (s)

+Receives a string and returns a copy of this string with all +lowercase letters changed to uppercase. +All other characters are left unchanged. +The definition of what a lowercase letter is depends on the current locale. + + + +

5.4.1 - Patterns

+ + +

Character Class:

+A character class is used to represent a set of characters. +The following combinations are allowed in describing a character class: + +

    + +
  • x: +(where x is not one of the magic characters +^$()%.[]*+-?) +represents the character x itself. +
  • + +
  • .: (a dot) represents all characters.
  • + +
  • %a: represents all letters.
  • + +
  • %c: represents all control characters.
  • + +
  • %d: represents all digits.
  • + +
  • %l: represents all lowercase letters.
  • + +
  • %p: represents all punctuation characters.
  • + +
  • %s: represents all space characters.
  • + +
  • %u: represents all uppercase letters.
  • + +
  • %w: represents all alphanumeric characters.
  • + +
  • %x: represents all hexadecimal digits.
  • + +
  • %z: represents the character with representation 0.
  • + +
  • %x: (where x is any non-alphanumeric character) +represents the character x. +This is the standard way to escape the magic characters. +Any punctuation character (even the non magic) +can be preceded by a '%' +when used to represent itself in a pattern. +
  • + +
  • [set]: +represents the class which is the union of all +characters in set. +A range of characters can be specified by +separating the end characters of the range with a '-'. +All classes %x described above can also be used as +components in set. +All other characters in set represent themselves. +For example, [%w_] (or [_%w]) +represents all alphanumeric characters plus the underscore, +[0-7] represents the octal digits, +and [0-7%l%-] represents the octal digits plus +the lowercase letters plus the '-' character. + + +

    +The interaction between ranges and classes is not defined. +Therefore, patterns like [%a-z] or [a-%%] +have no meaning. +

  • + +
  • [^set]: +represents the complement of set, +where set is interpreted as above. +
  • + +

+For all classes represented by single letters (%a, %c, etc.), +the corresponding uppercase letter represents the complement of the class. +For instance, %S represents all non-space characters. + + +

+The definitions of letter, space, and other character groups +depend on the current locale. +In particular, the class [a-z] may not be equivalent to %l. + + + + + +

Pattern Item:

+A pattern item can be + +

    + +
  • +a single character class, +which matches any single character in the class; +
  • + +
  • +a single character class followed by '*', +which matches 0 or more repetitions of characters in the class. +These repetition items will always match the longest possible sequence; +
  • + +
  • +a single character class followed by '+', +which matches 1 or more repetitions of characters in the class. +These repetition items will always match the longest possible sequence; +
  • + +
  • +a single character class followed by '-', +which also matches 0 or more repetitions of characters in the class. +Unlike '*', +these repetition items will always match the shortest possible sequence; +
  • + +
  • +a single character class followed by '?', +which matches 0 or 1 occurrence of a character in the class; +
  • + +
  • +%n, for n between 1 and 9; +such item matches a substring equal to the n-th captured string +(see below); +
  • + +
  • +%bxy, where x and y are two distinct characters; +such item matches strings that start with x, end with y, +and where the x and y are balanced. +This means that, if one reads the string from left to right, +counting +1 for an x and -1 for a y, +the ending y is the first y where the count reaches 0. +For instance, the item %b() matches expressions with +balanced parentheses. +
  • + +
+ + + + +

Pattern:

+A pattern is a sequence of pattern items. +A '^' at the beginning of a pattern anchors the match at the +beginning of the subject string. +A '$' at the end of a pattern anchors the match at the +end of the subject string. +At other positions, +'^' and '$' have no special meaning and represent themselves. + + + + + +

Captures:

+A pattern can contain sub-patterns enclosed in parentheses; +they describe captures. +When a match succeeds, the substrings of the subject string +that match captures are stored (captured) for future use. +Captures are numbered according to their left parentheses. +For instance, in the pattern "(a*(.)%w(%s*))", +the part of the string matching "a*(.)%w(%s*)" is +stored as the first capture (and therefore has number 1); +the character matching "." is captured with number 2, +and the part matching "%s*" has number 3. + + +

+As a special case, the empty capture () captures +the current string position (a number). +For instance, if we apply the pattern "()aa()" on the +string "flaaap", there will be two captures: 3 and 5. + + +

+A pattern cannot contain embedded zeros. Use %z instead. + + + + + + + + + + + +

5.5 - Table Manipulation

+This library provides generic functions for table manipulation. +It provides all its functions inside the table table. + + +

+Most functions in the table library assume that the table +represents an array or a list. +For these functions, when we talk about the "length" of a table +we mean the result of the length operator. + + +

+


table.concat (table [, sep [, i [, j]]])

+Given an array where all elements are strings or numbers, +returns table[i]..sep..table[i+1] ··· sep..table[j]. +The default value for sep is the empty string, +the default for i is 1, +and the default for j is the length of the table. +If i is greater than j, returns the empty string. + + + + +

+


table.insert (table, [pos,] value)

+ + +

+Inserts element value at position pos in table, +shifting up other elements to open space, if necessary. +The default value for pos is n+1, +where n is the length of the table (see §2.5.5), +so that a call table.insert(t,x) inserts x at the end +of table t. + + + + +

+


table.maxn (table)

+ + +

+Returns the largest positive numerical index of the given table, +or zero if the table has no positive numerical indices. +(To do its job this function does a linear traversal of +the whole table.) + + + + +

+


table.remove (table [, pos])

+ + +

+Removes from table the element at position pos, +shifting down other elements to close the space, if necessary. +Returns the value of the removed element. +The default value for pos is n, +where n is the length of the table, +so that a call table.remove(t) removes the last element +of table t. + + + + +

+


table.sort (table [, comp])

+Sorts table elements in a given order, in-place, +from table[1] to table[n], +where n is the length of the table. +If comp is given, +then it must be a function that receives two table elements, +and returns true +when the first is less than the second +(so that not comp(a[i+1],a[i]) will be true after the sort). +If comp is not given, +then the standard Lua operator < is used instead. + + +

+The sort algorithm is not stable; +that is, elements considered equal by the given order +may have their relative positions changed by the sort. + + + + + + + +

5.6 - Mathematical Functions

+ +

+This library is an interface to the standard C math library. +It provides all its functions inside the table math. + + +

+


math.abs (x)

+ + +

+Returns the absolute value of x. + + + + +

+


math.acos (x)

+ + +

+Returns the arc cosine of x (in radians). + + + + +

+


math.asin (x)

+ + +

+Returns the arc sine of x (in radians). + + + + +

+


math.atan (x)

+ + +

+Returns the arc tangent of x (in radians). + + + + +

+


math.atan2 (y, x)

+ + +

+Returns the arc tangent of y/x (in radians), +but uses the signs of both parameters to find the +quadrant of the result. +(It also handles correctly the case of x being zero.) + + + + +

+


math.ceil (x)

+ + +

+Returns the smallest integer larger than or equal to x. + + + + +

+


math.cos (x)

+ + +

+Returns the cosine of x (assumed to be in radians). + + + + +

+


math.cosh (x)

+ + +

+Returns the hyperbolic cosine of x. + + + + +

+


math.deg (x)

+ + +

+Returns the angle x (given in radians) in degrees. + + + + +

+


math.exp (x)

+ + +

+Returns the value ex. + + + + +

+


math.floor (x)

+ + +

+Returns the largest integer smaller than or equal to x. + + + + +

+


math.fmod (x, y)

+ + +

+Returns the remainder of the division of x by y +that rounds the quotient towards zero. + + + + +

+


math.frexp (x)

+ + +

+Returns m and e such that x = m2e, +e is an integer and the absolute value of m is +in the range [0.5, 1) +(or zero when x is zero). + + + + +

+


math.huge

+ + +

+The value HUGE_VAL, +a value larger than or equal to any other numerical value. + + + + +

+


math.ldexp (m, e)

+ + +

+Returns m2e (e should be an integer). + + + + +

+


math.log (x)

+ + +

+Returns the natural logarithm of x. + + + + +

+


math.log10 (x)

+ + +

+Returns the base-10 logarithm of x. + + + + +

+


math.max (x, ···)

+ + +

+Returns the maximum value among its arguments. + + + + +

+


math.min (x, ···)

+ + +

+Returns the minimum value among its arguments. + + + + +

+


math.modf (x)

+ + +

+Returns two numbers, +the integral part of x and the fractional part of x. + + + + +

+


math.pi

+ + +

+The value of pi. + + + + +

+


math.pow (x, y)

+ + +

+Returns xy. +(You can also use the expression x^y to compute this value.) + + + + +

+


math.rad (x)

+ + +

+Returns the angle x (given in degrees) in radians. + + + + +

+


math.random ([m [, n]])

+ + +

+This function is an interface to the simple +pseudo-random generator function rand provided by ANSI C. +(No guarantees can be given for its statistical properties.) + + +

+When called without arguments, +returns a uniform pseudo-random real number +in the range [0,1). +When called with an integer number m, +math.random returns +a uniform pseudo-random integer in the range [1, m]. +When called with two integer numbers m and n, +math.random returns a uniform pseudo-random +integer in the range [m, n]. + + + + +

+


math.randomseed (x)

+ + +

+Sets x as the "seed" +for the pseudo-random generator: +equal seeds produce equal sequences of numbers. + + + + +

+


math.sin (x)

+ + +

+Returns the sine of x (assumed to be in radians). + + + + +

+


math.sinh (x)

+ + +

+Returns the hyperbolic sine of x. + + + + +

+


math.sqrt (x)

+ + +

+Returns the square root of x. +(You can also use the expression x^0.5 to compute this value.) + + + + +

+


math.tan (x)

+ + +

+Returns the tangent of x (assumed to be in radians). + + + + +

+


math.tanh (x)

+ + +

+Returns the hyperbolic tangent of x. + + + + + + + +

5.7 - Input and Output Facilities

+ +

+The I/O library provides two different styles for file manipulation. +The first one uses implicit file descriptors; +that is, there are operations to set a default input file and a +default output file, +and all input/output operations are over these default files. +The second style uses explicit file descriptors. + + +

+When using implicit file descriptors, +all operations are supplied by table io. +When using explicit file descriptors, +the operation io.open returns a file descriptor +and then all operations are supplied as methods of the file descriptor. + + +

+The table io also provides +three predefined file descriptors with their usual meanings from C: +io.stdin, io.stdout, and io.stderr. +The I/O library never closes these files. + + +

+Unless otherwise stated, +all I/O functions return nil on failure +(plus an error message as a second result and +a system-dependent error code as a third result) +and some value different from nil on success. + + +

+


io.close ([file])

+ + +

+Equivalent to file:close(). +Without a file, closes the default output file. + + + + +

+


io.flush ()

+ + +

+Equivalent to file:flush over the default output file. + + + + +

+


io.input ([file])

+ + +

+When called with a file name, it opens the named file (in text mode), +and sets its handle as the default input file. +When called with a file handle, +it simply sets this file handle as the default input file. +When called without parameters, +it returns the current default input file. + + +

+In case of errors this function raises the error, +instead of returning an error code. + + + + +

+


io.lines ([filename])

+ + +

+Opens the given file name in read mode +and returns an iterator function that, +each time it is called, +returns a new line from the file. +Therefore, the construction + +

+     for line in io.lines(filename) do body end
+

+will iterate over all lines of the file. +When the iterator function detects the end of file, +it returns nil (to finish the loop) and automatically closes the file. + + +

+The call io.lines() (with no file name) is equivalent +to io.input():lines(); +that is, it iterates over the lines of the default input file. +In this case it does not close the file when the loop ends. + + + + +

+


io.open (filename [, mode])

+ + +

+This function opens a file, +in the mode specified in the string mode. +It returns a new file handle, +or, in case of errors, nil plus an error message. + + +

+The mode string can be any of the following: + +

    +
  • "r": read mode (the default);
  • +
  • "w": write mode;
  • +
  • "a": append mode;
  • +
  • "r+": update mode, all previous data is preserved;
  • +
  • "w+": update mode, all previous data is erased;
  • +
  • "a+": append update mode, previous data is preserved, + writing is only allowed at the end of file.
  • +

+The mode string can also have a 'b' at the end, +which is needed in some systems to open the file in binary mode. +This string is exactly what is used in the +standard C function fopen. + + + + +

+


io.output ([file])

+ + +

+Similar to io.input, but operates over the default output file. + + + + +

+


io.popen (prog [, mode])

+ + +

+Starts program prog in a separated process and returns +a file handle that you can use to read data from this program +(if mode is "r", the default) +or to write data to this program +(if mode is "w"). + + +

+This function is system dependent and is not available +on all platforms. + + + + +

+


io.read (···)

+ + +

+Equivalent to io.input():read. + + + + +

+


io.tmpfile ()

+ + +

+Returns a handle for a temporary file. +This file is opened in update mode +and it is automatically removed when the program ends. + + + + +

+


io.type (obj)

+ + +

+Checks whether obj is a valid file handle. +Returns the string "file" if obj is an open file handle, +"closed file" if obj is a closed file handle, +or nil if obj is not a file handle. + + + + +

+


io.write (···)

+ + +

+Equivalent to io.output():write. + + + + +

+


file:close ()

+ + +

+Closes file. +Note that files are automatically closed when +their handles are garbage collected, +but that takes an unpredictable amount of time to happen. + + + + +

+


file:flush ()

+ + +

+Saves any written data to file. + + + + +

+


file:lines ()

+ + +

+Returns an iterator function that, +each time it is called, +returns a new line from the file. +Therefore, the construction + +

+     for line in file:lines() do body end
+

+will iterate over all lines of the file. +(Unlike io.lines, this function does not close the file +when the loop ends.) + + + + +

+


file:read (···)

+ + +

+Reads the file file, +according to the given formats, which specify what to read. +For each format, +the function returns a string (or a number) with the characters read, +or nil if it cannot read data with the specified format. +When called without formats, +it uses a default format that reads the entire next line +(see below). + + +

+The available formats are + +

    + +
  • "*n": +reads a number; +this is the only format that returns a number instead of a string. +
  • + +
  • "*a": +reads the whole file, starting at the current position. +On end of file, it returns the empty string. +
  • + +
  • "*l": +reads the next line (skipping the end of line), +returning nil on end of file. +This is the default format. +
  • + +
  • number: +reads a string with up to this number of characters, +returning nil on end of file. +If number is zero, +it reads nothing and returns an empty string, +or nil on end of file. +
  • + +
+ + + +

+


file:seek ([whence] [, offset])

+ + +

+Sets and gets the file position, +measured from the beginning of the file, +to the position given by offset plus a base +specified by the string whence, as follows: + +

    +
  • "set": base is position 0 (beginning of the file);
  • +
  • "cur": base is current position;
  • +
  • "end": base is end of file;
  • +

+In case of success, function seek returns the final file position, +measured in bytes from the beginning of the file. +If this function fails, it returns nil, +plus a string describing the error. + + +

+The default value for whence is "cur", +and for offset is 0. +Therefore, the call file:seek() returns the current +file position, without changing it; +the call file:seek("set") sets the position to the +beginning of the file (and returns 0); +and the call file:seek("end") sets the position to the +end of the file, and returns its size. + + + + +

+


file:setvbuf (mode [, size])

+ + +

+Sets the buffering mode for an output file. +There are three available modes: + +

    + +
  • "no": +no buffering; the result of any output operation appears immediately. +
  • + +
  • "full": +full buffering; output operation is performed only +when the buffer is full (or when you explicitly flush the file +(see io.flush)). +
  • + +
  • "line": +line buffering; output is buffered until a newline is output +or there is any input from some special files +(such as a terminal device). +
  • + +

+For the last two cases, size +specifies the size of the buffer, in bytes. +The default is an appropriate size. + + + + +

+


file:write (···)

+ + +

+Writes the value of each of its arguments to +the file. +The arguments must be strings or numbers. +To write other values, +use tostring or string.format before write. + + + + + + + +

5.8 - Operating System Facilities

+ +

+This library is implemented through table os. + + +

+


os.clock ()

+ + +

+Returns an approximation of the amount in seconds of CPU time +used by the program. + + + + +

+


os.date ([format [, time]])

+ + +

+Returns a string or a table containing date and time, +formatted according to the given string format. + + +

+If the time argument is present, +this is the time to be formatted +(see the os.time function for a description of this value). +Otherwise, date formats the current time. + + +

+If format starts with '!', +then the date is formatted in Coordinated Universal Time. +After this optional character, +if format is the string "*t", +then date returns a table with the following fields: +year (four digits), month (1--12), day (1--31), +hour (0--23), min (0--59), sec (0--61), +wday (weekday, Sunday is 1), +yday (day of the year), +and isdst (daylight saving flag, a boolean). + + +

+If format is not "*t", +then date returns the date as a string, +formatted according to the same rules as the C function strftime. + + +

+When called without arguments, +date returns a reasonable date and time representation that depends on +the host system and on the current locale +(that is, os.date() is equivalent to os.date("%c")). + + + + +

+


os.difftime (t2, t1)

+ + +

+Returns the number of seconds from time t1 to time t2. +In POSIX, Windows, and some other systems, +this value is exactly t2-t1. + + + + +

+


os.execute ([command])

+ + +

+This function is equivalent to the C function system. +It passes command to be executed by an operating system shell. +It returns a status code, which is system-dependent. +If command is absent, then it returns nonzero if a shell is available +and zero otherwise. + + + + +

+


os.exit ([code])

+ + +

+Calls the C function exit, +with an optional code, +to terminate the host program. +The default value for code is the success code. + + + + +

+


os.getenv (varname)

+ + +

+Returns the value of the process environment variable varname, +or nil if the variable is not defined. + + + + +

+


os.remove (filename)

+ + +

+Deletes the file or directory with the given name. +Directories must be empty to be removed. +If this function fails, it returns nil, +plus a string describing the error. + + + + +

+


os.rename (oldname, newname)

+ + +

+Renames file or directory named oldname to newname. +If this function fails, it returns nil, +plus a string describing the error. + + + + +

+


os.setlocale (locale [, category])

+ + +

+Sets the current locale of the program. +locale is a string specifying a locale; +category is an optional string describing which category to change: +"all", "collate", "ctype", +"monetary", "numeric", or "time"; +the default category is "all". +The function returns the name of the new locale, +or nil if the request cannot be honored. + + +

+If locale is the empty string, +the current locale is set to an implementation-defined native locale. +If locale is the string "C", +the current locale is set to the standard C locale. + + +

+When called with nil as the first argument, +this function only returns the name of the current locale +for the given category. + + + + +

+


os.time ([table])

+ + +

+Returns the current time when called without arguments, +or a time representing the date and time specified by the given table. +This table must have fields year, month, and day, +and may have fields hour, min, sec, and isdst +(for a description of these fields, see the os.date function). + + +

+The returned value is a number, whose meaning depends on your system. +In POSIX, Windows, and some other systems, this number counts the number +of seconds since some given start time (the "epoch"). +In other systems, the meaning is not specified, +and the number returned by time can be used only as an argument to +date and difftime. + + + + +

+


os.tmpname ()

+ + +

+Returns a string with a file name that can +be used for a temporary file. +The file must be explicitly opened before its use +and explicitly removed when no longer needed. + + +

+On some systems (POSIX), +this function also creates a file with that name, +to avoid security risks. +(Someone else might create the file with wrong permissions +in the time between getting the name and creating the file.) +You still have to open the file to use it +and to remove it (even if you do not use it). + + +

+When possible, +you may prefer to use io.tmpfile, +which automatically removes the file when the program ends. + + + + + + + +

5.9 - The Debug Library

+ +

+This library provides +the functionality of the debug interface to Lua programs. +You should exert care when using this library. +The functions provided here should be used exclusively for debugging +and similar tasks, such as profiling. +Please resist the temptation to use them as a +usual programming tool: +they can be very slow. +Moreover, several of these functions +violate some assumptions about Lua code +(e.g., that variables local to a function +cannot be accessed from outside or +that userdata metatables cannot be changed by Lua code) +and therefore can compromise otherwise secure code. + + +

+All functions in this library are provided +inside the debug table. +All functions that operate over a thread +have an optional first argument which is the +thread to operate over. +The default is always the current thread. + + +

+


debug.debug ()

+ + +

+Enters an interactive mode with the user, +running each string that the user enters. +Using simple commands and other debug facilities, +the user can inspect global and local variables, +change their values, evaluate expressions, and so on. +A line containing only the word cont finishes this function, +so that the caller continues its execution. + + +

+Note that commands for debug.debug are not lexically nested +within any function, and so have no direct access to local variables. + + + + +

+


debug.getfenv (o)

+Returns the environment of object o. + + + + +

+


debug.gethook ([thread])

+ + +

+Returns the current hook settings of the thread, as three values: +the current hook function, the current hook mask, +and the current hook count +(as set by the debug.sethook function). + + + + +

+


debug.getinfo ([thread,] function [, what])

+ + +

+Returns a table with information about a function. +You can give the function directly, +or you can give a number as the value of function, +which means the function running at level function of the call stack +of the given thread: +level 0 is the current function (getinfo itself); +level 1 is the function that called getinfo; +and so on. +If function is a number larger than the number of active functions, +then getinfo returns nil. + + +

+The returned table can contain all the fields returned by lua_getinfo, +with the string what describing which fields to fill in. +The default for what is to get all information available, +except the table of valid lines. +If present, +the option 'f' +adds a field named func with the function itself. +If present, +the option 'L' +adds a field named activelines with the table of +valid lines. + + +

+For instance, the expression debug.getinfo(1,"n").name returns +a table with a name for the current function, +if a reasonable name can be found, +and the expression debug.getinfo(print) +returns a table with all available information +about the print function. + + + + +

+


debug.getlocal ([thread,] level, local)

+ + +

+This function returns the name and the value of the local variable +with index local of the function at level level of the stack. +(The first parameter or local variable has index 1, and so on, +until the last active local variable.) +The function returns nil if there is no local +variable with the given index, +and raises an error when called with a level out of range. +(You can call debug.getinfo to check whether the level is valid.) + + +

+Variable names starting with '(' (open parentheses) +represent internal variables +(loop control variables, temporaries, and C function locals). + + + + +

+


debug.getmetatable (object)

+ + +

+Returns the metatable of the given object +or nil if it does not have a metatable. + + + + +

+


debug.getregistry ()

+ + +

+Returns the registry table (see §3.5). + + + + +

+


debug.getupvalue (func, up)

+ + +

+This function returns the name and the value of the upvalue +with index up of the function func. +The function returns nil if there is no upvalue with the given index. + + + + +

+


debug.setfenv (object, table)

+ + +

+Sets the environment of the given object to the given table. +Returns object. + + + + +

+


debug.sethook ([thread,] hook, mask [, count])

+ + +

+Sets the given function as a hook. +The string mask and the number count describe +when the hook will be called. +The string mask may have the following characters, +with the given meaning: + +

    +
  • "c": the hook is called every time Lua calls a function;
  • +
  • "r": the hook is called every time Lua returns from a function;
  • +
  • "l": the hook is called every time Lua enters a new line of code.
  • +

+With a count different from zero, +the hook is called after every count instructions. + + +

+When called without arguments, +debug.sethook turns off the hook. + + +

+When the hook is called, its first parameter is a string +describing the event that has triggered its call: +"call", "return" (or "tail return", +when simulating a return from a tail call), +"line", and "count". +For line events, +the hook also gets the new line number as its second parameter. +Inside a hook, +you can call getinfo with level 2 to get more information about +the running function +(level 0 is the getinfo function, +and level 1 is the hook function), +unless the event is "tail return". +In this case, Lua is only simulating the return, +and a call to getinfo will return invalid data. + + + + +

+


debug.setlocal ([thread,] level, local, value)

+ + +

+This function assigns the value value to the local variable +with index local of the function at level level of the stack. +The function returns nil if there is no local +variable with the given index, +and raises an error when called with a level out of range. +(You can call getinfo to check whether the level is valid.) +Otherwise, it returns the name of the local variable. + + + + +

+


debug.setmetatable (object, table)

+ + +

+Sets the metatable for the given object to the given table +(which can be nil). + + + + +

+


debug.setupvalue (func, up, value)

+ + +

+This function assigns the value value to the upvalue +with index up of the function func. +The function returns nil if there is no upvalue +with the given index. +Otherwise, it returns the name of the upvalue. + + + + +

+


debug.traceback ([thread,] [message [, level]])

+ + +

+Returns a string with a traceback of the call stack. +An optional message string is appended +at the beginning of the traceback. +An optional level number tells at which level +to start the traceback +(default is 1, the function calling traceback). + + + + + + + +

6 - Lua Stand-alone

+ +

+Although Lua has been designed as an extension language, +to be embedded in a host C program, +it is also frequently used as a stand-alone language. +An interpreter for Lua as a stand-alone language, +called simply lua, +is provided with the standard distribution. +The stand-alone interpreter includes +all standard libraries, including the debug library. +Its usage is: + +

+     lua [options] [script [args]]
+

+The options are: + +

    +
  • -e stat: executes string stat;
  • +
  • -l mod: "requires" mod;
  • +
  • -i: enters interactive mode after running script;
  • +
  • -v: prints version information;
  • +
  • --: stops handling options;
  • +
  • -: executes stdin as a file and stops handling options.
  • +

+After handling its options, lua runs the given script, +passing to it the given args as string arguments. +When called without arguments, +lua behaves as lua -v -i +when the standard input (stdin) is a terminal, +and as lua - otherwise. + + +

+Before running any argument, +the interpreter checks for an environment variable LUA_INIT. +If its format is @filename, +then lua executes the file. +Otherwise, lua executes the string itself. + + +

+All options are handled in order, except -i. +For instance, an invocation like + +

+     $ lua -e'a=1' -e 'print(a)' script.lua
+

+will first set a to 1, then print the value of a (which is '1'), +and finally run the file script.lua with no arguments. +(Here $ is the shell prompt. Your prompt may be different.) + + +

+Before starting to run the script, +lua collects all arguments in the command line +in a global table called arg. +The script name is stored at index 0, +the first argument after the script name goes to index 1, +and so on. +Any arguments before the script name +(that is, the interpreter name plus the options) +go to negative indices. +For instance, in the call + +

+     $ lua -la b.lua t1 t2
+

+the interpreter first runs the file a.lua, +then creates a table + +

+     arg = { [-2] = "lua", [-1] = "-la",
+             [0] = "b.lua",
+             [1] = "t1", [2] = "t2" }
+

+and finally runs the file b.lua. +The script is called with arg[1], arg[2], ··· +as arguments; +it can also access these arguments with the vararg expression '...'. + + +

+In interactive mode, +if you write an incomplete statement, +the interpreter waits for its completion +by issuing a different prompt. + + +

+If the global variable _PROMPT contains a string, +then its value is used as the prompt. +Similarly, if the global variable _PROMPT2 contains a string, +its value is used as the secondary prompt +(issued during incomplete statements). +Therefore, both prompts can be changed directly on the command line +or in any Lua programs by assigning to _PROMPT. +See the next example: + +

+     $ lua -e"_PROMPT='myprompt> '" -i
+

+(The outer pair of quotes is for the shell, +the inner pair is for Lua.) +Note the use of -i to enter interactive mode; +otherwise, +the program would just end silently +right after the assignment to _PROMPT. + + +

+To allow the use of Lua as a +script interpreter in Unix systems, +the stand-alone interpreter skips +the first line of a chunk if it starts with #. +Therefore, Lua scripts can be made into executable programs +by using chmod +x and the #! form, +as in + +

+     #!/usr/local/bin/lua
+

+(Of course, +the location of the Lua interpreter may be different in your machine. +If lua is in your PATH, +then + +

+     #!/usr/bin/env lua
+

+is a more portable solution.) + + + +

7 - Incompatibilities with the Previous Version

+ +

+Here we list the incompatibilities that you may find when moving a program +from Lua 5.0 to Lua 5.1. +You can avoid most of the incompatibilities compiling Lua with +appropriate options (see file luaconf.h). +However, +all these compatibility options will be removed in the next version of Lua. + + + +

7.1 - Changes in the Language

+
    + +
  • +The vararg system changed from the pseudo-argument arg with a +table with the extra arguments to the vararg expression. +(See compile-time option LUA_COMPAT_VARARG in luaconf.h.) +
  • + +
  • +There was a subtle change in the scope of the implicit +variables of the for statement and for the repeat statement. +
  • + +
  • +The long string/long comment syntax ([[string]]) +does not allow nesting. +You can use the new syntax ([=[string]=]) in these cases. +(See compile-time option LUA_COMPAT_LSTR in luaconf.h.) +
  • + +
+ + + + +

7.2 - Changes in the Libraries

+
    + +
  • +Function string.gfind was renamed string.gmatch. +(See compile-time option LUA_COMPAT_GFIND in luaconf.h.) +
  • + +
  • +When string.gsub is called with a function as its +third argument, +whenever this function returns nil or false the +replacement string is the whole match, +instead of the empty string. +
  • + +
  • +Function table.setn was deprecated. +Function table.getn corresponds +to the new length operator (#); +use the operator instead of the function. +(See compile-time option LUA_COMPAT_GETN in luaconf.h.) +
  • + +
  • +Function loadlib was renamed package.loadlib. +(See compile-time option LUA_COMPAT_LOADLIB in luaconf.h.) +
  • + +
  • +Function math.mod was renamed math.fmod. +(See compile-time option LUA_COMPAT_MOD in luaconf.h.) +
  • + +
  • +Functions table.foreach and table.foreachi are deprecated. +You can use a for loop with pairs or ipairs instead. +
  • + +
  • +There were substantial changes in function require due to +the new module system. +However, the new behavior is mostly compatible with the old, +but require gets the path from package.path instead +of from LUA_PATH. +
  • + +
  • +Function collectgarbage has different arguments. +Function gcinfo is deprecated; +use collectgarbage("count") instead. +
  • + +
+ + + + +

7.3 - Changes in the API

+
    + +
  • +The luaopen_* functions (to open libraries) +cannot be called directly, +like a regular C function. +They must be called through Lua, +like a Lua function. +
  • + +
  • +Function lua_open was replaced by lua_newstate to +allow the user to set a memory-allocation function. +You can use luaL_newstate from the standard library to +create a state with a standard allocation function +(based on realloc). +
  • + +
  • +Functions luaL_getn and luaL_setn +(from the auxiliary library) are deprecated. +Use lua_objlen instead of luaL_getn +and nothing instead of luaL_setn. +
  • + +
  • +Function luaL_openlib was replaced by luaL_register. +
  • + +
  • +Function luaL_checkudata now throws an error when the given value +is not a userdata of the expected type. +(In Lua 5.0 it returned NULL.) +
  • + +
+ + + + +

8 - The Complete Syntax of Lua

+ +

+Here is the complete syntax of Lua in extended BNF. +(It does not describe operator precedences.) + + + + +

+
+	chunk ::= {stat [`;´]} [laststat [`;´]]
+
+	block ::= chunk
+
+	stat ::=  varlist `=´ explist | 
+		 functioncall | 
+		 do block end | 
+		 while exp do block end | 
+		 repeat block until exp | 
+		 if exp then block {elseif exp then block} [else block] end | 
+		 for Name `=´ exp `,´ exp [`,´ exp] do block end | 
+		 for namelist in explist do block end | 
+		 function funcname funcbody | 
+		 local function Name funcbody | 
+		 local namelist [`=´ explist] 
+
+	laststat ::= return [explist] | break
+
+	funcname ::= Name {`.´ Name} [`:´ Name]
+
+	varlist ::= var {`,´ var}
+
+	var ::=  Name | prefixexp `[´ exp `]´ | prefixexp `.´ Name 
+
+	namelist ::= Name {`,´ Name}
+
+	explist ::= {exp `,´} exp
+
+	exp ::=  nil | false | true | Number | String | `...´ | function | 
+		 prefixexp | tableconstructor | exp binop exp | unop exp 
+
+	prefixexp ::= var | functioncall | `(´ exp `)´
+
+	functioncall ::=  prefixexp args | prefixexp `:´ Name args 
+
+	args ::=  `(´ [explist] `)´ | tableconstructor | String 
+
+	function ::= function funcbody
+
+	funcbody ::= `(´ [parlist] `)´ block end
+
+	parlist ::= namelist [`,´ `...´] | `...´
+
+	tableconstructor ::= `{´ [fieldlist] `}´
+
+	fieldlist ::= field {fieldsep field} [fieldsep]
+
+	field ::= `[´ exp `]´ `=´ exp | Name `=´ exp | exp
+
+	fieldsep ::= `,´ | `;´
+
+	binop ::= `+´ | `-´ | `*´ | `/´ | `^´ | `%´ | `..´ | 
+		 `<´ | `<=´ | `>´ | `>=´ | `==´ | `~=´ | 
+		 and | or
+
+	unop ::= `-´ | not | `#´
+
+
+ +

+ + + + + + + +


+ +Last update: +Mon Feb 13 18:54:19 BRST 2012 + + + + + diff --git a/deps/lua/doc/readme.html b/deps/lua/doc/readme.html new file mode 100644 index 0000000..3ed6a81 --- /dev/null +++ b/deps/lua/doc/readme.html @@ -0,0 +1,40 @@ + + +Lua documentation + + + + + +
+

+Lua +Documentation +

+ +This is the documentation included in the source distribution of Lua 5.1.5. + + + +Lua's +official web site +contains updated documentation, +especially the +reference manual. +

+ +


+ +Last update: +Fri Feb 3 09:44:42 BRST 2012 + + + + diff --git a/deps/lua/etc/Makefile b/deps/lua/etc/Makefile new file mode 100644 index 0000000..6d00008 --- /dev/null +++ b/deps/lua/etc/Makefile @@ -0,0 +1,44 @@ +# makefile for Lua etc + +TOP= .. +LIB= $(TOP)/src +INC= $(TOP)/src +BIN= $(TOP)/src +SRC= $(TOP)/src +TST= $(TOP)/test + +CC= gcc +CFLAGS= -O2 -Wall -I$(INC) $(MYCFLAGS) +MYCFLAGS= +MYLDFLAGS= -Wl,-E +MYLIBS= -lm +#MYLIBS= -lm -Wl,-E -ldl -lreadline -lhistory -lncurses +RM= rm -f + +default: + @echo 'Please choose a target: min noparser one strict clean' + +min: min.c + $(CC) $(CFLAGS) $@.c -L$(LIB) -llua $(MYLIBS) + echo 'print"Hello there!"' | ./a.out + +noparser: noparser.o + $(CC) noparser.o $(SRC)/lua.o -L$(LIB) -llua $(MYLIBS) + $(BIN)/luac $(TST)/hello.lua + -./a.out luac.out + -./a.out -e'a=1' + +one: + $(CC) $(CFLAGS) all.c $(MYLIBS) + ./a.out $(TST)/hello.lua + +strict: + -$(BIN)/lua -e 'print(a);b=2' + -$(BIN)/lua -lstrict -e 'print(a)' + -$(BIN)/lua -e 'function f() b=2 end f()' + -$(BIN)/lua -lstrict -e 'function f() b=2 end f()' + +clean: + $(RM) a.out core core.* *.o luac.out + +.PHONY: default min noparser one strict clean diff --git a/deps/lua/etc/README b/deps/lua/etc/README new file mode 100644 index 0000000..5149fc9 --- /dev/null +++ b/deps/lua/etc/README @@ -0,0 +1,37 @@ +This directory contains some useful files and code. +Unlike the code in ../src, everything here is in the public domain. + +If any of the makes fail, you're probably not using the same libraries +used to build Lua. Set MYLIBS in Makefile accordingly. + +all.c + Full Lua interpreter in a single file. + Do "make one" for a demo. + +lua.hpp + Lua header files for C++ using 'extern "C"'. + +lua.ico + A Lua icon for Windows (and web sites: save as favicon.ico). + Drawn by hand by Markus Gritsch . + +lua.pc + pkg-config data for Lua + +luavs.bat + Script to build Lua under "Visual Studio .NET Command Prompt". + Run it from the toplevel as etc\luavs.bat. + +min.c + A minimal Lua interpreter. + Good for learning and for starting your own. + Do "make min" for a demo. + +noparser.c + Linking with noparser.o avoids loading the parsing modules in lualib.a. + Do "make noparser" for a demo. + +strict.lua + Traps uses of undeclared global variables. + Do "make strict" for a demo. + diff --git a/deps/lua/etc/all.c b/deps/lua/etc/all.c new file mode 100644 index 0000000..dab68fa --- /dev/null +++ b/deps/lua/etc/all.c @@ -0,0 +1,38 @@ +/* +* all.c -- Lua core, libraries and interpreter in a single file +*/ + +#define luaall_c + +#include "lapi.c" +#include "lcode.c" +#include "ldebug.c" +#include "ldo.c" +#include "ldump.c" +#include "lfunc.c" +#include "lgc.c" +#include "llex.c" +#include "lmem.c" +#include "lobject.c" +#include "lopcodes.c" +#include "lparser.c" +#include "lstate.c" +#include "lstring.c" +#include "ltable.c" +#include "ltm.c" +#include "lundump.c" +#include "lvm.c" +#include "lzio.c" + +#include "lauxlib.c" +#include "lbaselib.c" +#include "ldblib.c" +#include "liolib.c" +#include "linit.c" +#include "lmathlib.c" +#include "loadlib.c" +#include "loslib.c" +#include "lstrlib.c" +#include "ltablib.c" + +#include "lua.c" diff --git a/deps/lua/etc/lua.hpp b/deps/lua/etc/lua.hpp new file mode 100644 index 0000000..ec417f5 --- /dev/null +++ b/deps/lua/etc/lua.hpp @@ -0,0 +1,9 @@ +// lua.hpp +// Lua header files for C++ +// <> not supplied automatically because Lua also compiles as C++ + +extern "C" { +#include "lua.h" +#include "lualib.h" +#include "lauxlib.h" +} diff --git a/deps/lua/etc/lua.ico b/deps/lua/etc/lua.ico new file mode 100644 index 0000000..ccbabc4 Binary files /dev/null and b/deps/lua/etc/lua.ico differ diff --git a/deps/lua/etc/lua.pc b/deps/lua/etc/lua.pc new file mode 100644 index 0000000..07e2852 --- /dev/null +++ b/deps/lua/etc/lua.pc @@ -0,0 +1,31 @@ +# lua.pc -- pkg-config data for Lua + +# vars from install Makefile + +# grep '^V=' ../Makefile +V= 5.1 +# grep '^R=' ../Makefile +R= 5.1.5 + +# grep '^INSTALL_.*=' ../Makefile | sed 's/INSTALL_TOP/prefix/' +prefix= /usr/local +INSTALL_BIN= ${prefix}/bin +INSTALL_INC= ${prefix}/include +INSTALL_LIB= ${prefix}/lib +INSTALL_MAN= ${prefix}/man/man1 +INSTALL_LMOD= ${prefix}/share/lua/${V} +INSTALL_CMOD= ${prefix}/lib/lua/${V} + +# canonical vars +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +includedir=${prefix}/include + +Name: Lua +Description: An Extensible Extension Language +Version: ${R} +Requires: +Libs: -L${libdir} -llua -lm +Cflags: -I${includedir} + +# (end of lua.pc) diff --git a/deps/lua/etc/luavs.bat b/deps/lua/etc/luavs.bat new file mode 100644 index 0000000..08c2bed --- /dev/null +++ b/deps/lua/etc/luavs.bat @@ -0,0 +1,28 @@ +@rem Script to build Lua under "Visual Studio .NET Command Prompt". +@rem Do not run from this directory; run it from the toplevel: etc\luavs.bat . +@rem It creates lua51.dll, lua51.lib, lua.exe, and luac.exe in src. +@rem (contributed by David Manura and Mike Pall) + +@setlocal +@set MYCOMPILE=cl /nologo /MD /O2 /W3 /c /D_CRT_SECURE_NO_DEPRECATE +@set MYLINK=link /nologo +@set MYMT=mt /nologo + +cd src +%MYCOMPILE% /DLUA_BUILD_AS_DLL l*.c +del lua.obj luac.obj +%MYLINK% /DLL /out:lua51.dll l*.obj +if exist lua51.dll.manifest^ + %MYMT% -manifest lua51.dll.manifest -outputresource:lua51.dll;2 +%MYCOMPILE% /DLUA_BUILD_AS_DLL lua.c +%MYLINK% /out:lua.exe lua.obj lua51.lib +if exist lua.exe.manifest^ + %MYMT% -manifest lua.exe.manifest -outputresource:lua.exe +%MYCOMPILE% l*.c print.c +del lua.obj linit.obj lbaselib.obj ldblib.obj liolib.obj lmathlib.obj^ + loslib.obj ltablib.obj lstrlib.obj loadlib.obj +%MYLINK% /out:luac.exe *.obj +if exist luac.exe.manifest^ + %MYMT% -manifest luac.exe.manifest -outputresource:luac.exe +del *.obj *.manifest +cd .. diff --git a/deps/lua/etc/min.c b/deps/lua/etc/min.c new file mode 100644 index 0000000..6a85a4d --- /dev/null +++ b/deps/lua/etc/min.c @@ -0,0 +1,39 @@ +/* +* min.c -- a minimal Lua interpreter +* loads stdin only with minimal error handling. +* no interaction, and no standard library, only a "print" function. +*/ + +#include + +#include "lua.h" +#include "lauxlib.h" + +static int print(lua_State *L) +{ + int n=lua_gettop(L); + int i; + for (i=1; i<=n; i++) + { + if (i>1) printf("\t"); + if (lua_isstring(L,i)) + printf("%s",lua_tostring(L,i)); + else if (lua_isnil(L,i)) + printf("%s","nil"); + else if (lua_isboolean(L,i)) + printf("%s",lua_toboolean(L,i) ? "true" : "false"); + else + printf("%s:%p",luaL_typename(L,i),lua_topointer(L,i)); + } + printf("\n"); + return 0; +} + +int main(void) +{ + lua_State *L=lua_open(); + lua_register(L,"print",print); + if (luaL_dofile(L,NULL)!=0) fprintf(stderr,"%s\n",lua_tostring(L,-1)); + lua_close(L); + return 0; +} diff --git a/deps/lua/etc/noparser.c b/deps/lua/etc/noparser.c new file mode 100644 index 0000000..13ba546 --- /dev/null +++ b/deps/lua/etc/noparser.c @@ -0,0 +1,50 @@ +/* +* The code below can be used to make a Lua core that does not contain the +* parsing modules (lcode, llex, lparser), which represent 35% of the total core. +* You'll only be able to load binary files and strings, precompiled with luac. +* (Of course, you'll have to build luac with the original parsing modules!) +* +* To use this module, simply compile it ("make noparser" does that) and list +* its object file before the Lua libraries. The linker should then not load +* the parsing modules. To try it, do "make luab". +* +* If you also want to avoid the dump module (ldump.o), define NODUMP. +* #define NODUMP +*/ + +#define LUA_CORE + +#include "llex.h" +#include "lparser.h" +#include "lzio.h" + +LUAI_FUNC void luaX_init (lua_State *L) { + UNUSED(L); +} + +LUAI_FUNC Proto *luaY_parser (lua_State *L, ZIO *z, Mbuffer *buff, const char *name) { + UNUSED(z); + UNUSED(buff); + UNUSED(name); + lua_pushliteral(L,"parser not loaded"); + lua_error(L); + return NULL; +} + +#ifdef NODUMP +#include "lundump.h" + +LUAI_FUNC int luaU_dump (lua_State* L, const Proto* f, lua_Writer w, void* data, int strip) { + UNUSED(f); + UNUSED(w); + UNUSED(data); + UNUSED(strip); +#if 1 + UNUSED(L); + return 0; +#else + lua_pushliteral(L,"dumper not loaded"); + lua_error(L); +#endif +} +#endif diff --git a/deps/lua/etc/strict.lua b/deps/lua/etc/strict.lua new file mode 100644 index 0000000..604619d --- /dev/null +++ b/deps/lua/etc/strict.lua @@ -0,0 +1,41 @@ +-- +-- strict.lua +-- checks uses of undeclared global variables +-- All global variables must be 'declared' through a regular assignment +-- (even assigning nil will do) in a main chunk before being used +-- anywhere or assigned to inside a function. +-- + +local getinfo, error, rawset, rawget = debug.getinfo, error, rawset, rawget + +local mt = getmetatable(_G) +if mt == nil then + mt = {} + setmetatable(_G, mt) +end + +mt.__declared = {} + +local function what () + local d = getinfo(3, "S") + return d and d.what or "C" +end + +mt.__newindex = function (t, n, v) + if not mt.__declared[n] then + local w = what() + if w ~= "main" and w ~= "C" then + error("assign to undeclared variable '"..n.."'", 2) + end + mt.__declared[n] = true + end + rawset(t, n, v) +end + +mt.__index = function (t, n) + if not mt.__declared[n] and what() ~= "C" then + error("variable '"..n.."' is not declared", 2) + end + return rawget(t, n) +end + diff --git a/deps/lua/src/Makefile b/deps/lua/src/Makefile new file mode 100644 index 0000000..f3bba2f --- /dev/null +++ b/deps/lua/src/Makefile @@ -0,0 +1,183 @@ +# makefile for building Lua +# see ../INSTALL for installation instructions +# see ../Makefile and luaconf.h for further customization + +# == CHANGE THE SETTINGS BELOW TO SUIT YOUR ENVIRONMENT ======================= + +# Your platform. See PLATS for possible values. +PLAT= none + +CC?= gcc +CFLAGS= -O2 -Wall $(MYCFLAGS) +AR= ar rcu +RANLIB= ranlib +RM= rm -f +LIBS= -lm $(MYLIBS) + +MYCFLAGS= +MYLDFLAGS= +MYLIBS= + +# == END OF USER SETTINGS. NO NEED TO CHANGE ANYTHING BELOW THIS LINE ========= + +PLATS= aix ansi bsd freebsd generic linux macosx mingw posix solaris + +LUA_A= liblua.a +CORE_O= lapi.o lcode.o ldebug.o ldo.o ldump.o lfunc.o lgc.o llex.o lmem.o \ + lobject.o lopcodes.o lparser.o lstate.o lstring.o ltable.o ltm.o \ + lundump.o lvm.o lzio.o strbuf.o fpconv.o +LIB_O= lauxlib.o lbaselib.o ldblib.o liolib.o lmathlib.o loslib.o ltablib.o \ + lstrlib.o loadlib.o linit.o lua_cjson.o lua_struct.o lua_cmsgpack.o \ + lua_bit.o + +LUA_T= lua +LUA_O= lua.o + +LUAC_T= luac +LUAC_O= luac.o print.o + +ALL_O= $(CORE_O) $(LIB_O) $(LUA_O) $(LUAC_O) +ALL_T= $(LUA_A) $(LUA_T) $(LUAC_T) +ALL_A= $(LUA_A) + +default: $(PLAT) + +all: $(ALL_T) + +o: $(ALL_O) + +a: $(ALL_A) + +$(LUA_A): $(CORE_O) $(LIB_O) + $(AR) $@ $(CORE_O) $(LIB_O) # DLL needs all object files + $(RANLIB) $@ + +$(LUA_T): $(LUA_O) $(LUA_A) + $(CC) -o $@ $(MYLDFLAGS) $(LUA_O) $(LUA_A) $(LIBS) + +$(LUAC_T): $(LUAC_O) $(LUA_A) + $(CC) -o $@ $(MYLDFLAGS) $(LUAC_O) $(LUA_A) $(LIBS) + +clean: + $(RM) $(ALL_T) $(ALL_O) + +depend: + @$(CC) $(CFLAGS) -MM l*.c print.c + +echo: + @echo "PLAT = $(PLAT)" + @echo "CC = $(CC)" + @echo "CFLAGS = $(CFLAGS)" + @echo "AR = $(AR)" + @echo "RANLIB = $(RANLIB)" + @echo "RM = $(RM)" + @echo "MYCFLAGS = $(MYCFLAGS)" + @echo "MYLDFLAGS = $(MYLDFLAGS)" + @echo "MYLIBS = $(MYLIBS)" + +# convenience targets for popular platforms + +none: + @echo "Please choose a platform:" + @echo " $(PLATS)" + +aix: + $(MAKE) all CC="xlc" CFLAGS="-O2 -DLUA_USE_POSIX -DLUA_USE_DLOPEN" MYLIBS="-ldl" MYLDFLAGS="-brtl -bexpall" + +ansi: + $(MAKE) all MYCFLAGS=-DLUA_ANSI + +bsd: + $(MAKE) all MYCFLAGS="-DLUA_USE_POSIX -DLUA_USE_DLOPEN" MYLIBS="-Wl,-E" + +freebsd: + $(MAKE) all MYCFLAGS="-DLUA_USE_LINUX" MYLIBS="-Wl,-E -lreadline" + +generic: + $(MAKE) all MYCFLAGS= + +linux: + $(MAKE) all MYCFLAGS=-DLUA_USE_LINUX MYLIBS="-Wl,-E -ldl -lreadline -lhistory -lncurses" + +macosx: + $(MAKE) all MYCFLAGS=-DLUA_USE_LINUX MYLIBS="-lreadline" +# use this on Mac OS X 10.3- +# $(MAKE) all MYCFLAGS=-DLUA_USE_MACOSX + +mingw: + $(MAKE) "LUA_A=lua51.dll" "LUA_T=lua.exe" \ + "AR=$(CC) -shared -o" "RANLIB=strip --strip-unneeded" \ + "MYCFLAGS=-DLUA_BUILD_AS_DLL" "MYLIBS=" "MYLDFLAGS=-s" lua.exe + $(MAKE) "LUAC_T=luac.exe" luac.exe + +posix: + $(MAKE) all MYCFLAGS=-DLUA_USE_POSIX + +solaris: + $(MAKE) all MYCFLAGS="-DLUA_USE_POSIX -DLUA_USE_DLOPEN" MYLIBS="-ldl" + +# list targets that do not create files (but not all makes understand .PHONY) +.PHONY: all $(PLATS) default o a clean depend echo none + +# DO NOT DELETE + +lapi.o: lapi.c lua.h luaconf.h lapi.h lobject.h llimits.h ldebug.h \ + lstate.h ltm.h lzio.h lmem.h ldo.h lfunc.h lgc.h lstring.h ltable.h \ + lundump.h lvm.h +lauxlib.o: lauxlib.c lua.h luaconf.h lauxlib.h +lbaselib.o: lbaselib.c lua.h luaconf.h lauxlib.h lualib.h +lcode.o: lcode.c lua.h luaconf.h lcode.h llex.h lobject.h llimits.h \ + lzio.h lmem.h lopcodes.h lparser.h ldebug.h lstate.h ltm.h ldo.h lgc.h \ + ltable.h +ldblib.o: ldblib.c lua.h luaconf.h lauxlib.h lualib.h +ldebug.o: ldebug.c lua.h luaconf.h lapi.h lobject.h llimits.h lcode.h \ + llex.h lzio.h lmem.h lopcodes.h lparser.h ldebug.h lstate.h ltm.h ldo.h \ + lfunc.h lstring.h lgc.h ltable.h lvm.h +ldo.o: ldo.c lua.h luaconf.h ldebug.h lstate.h lobject.h llimits.h ltm.h \ + lzio.h lmem.h ldo.h lfunc.h lgc.h lopcodes.h lparser.h lstring.h \ + ltable.h lundump.h lvm.h +ldump.o: ldump.c lua.h luaconf.h lobject.h llimits.h lstate.h ltm.h \ + lzio.h lmem.h lundump.h +lfunc.o: lfunc.c lua.h luaconf.h lfunc.h lobject.h llimits.h lgc.h lmem.h \ + lstate.h ltm.h lzio.h +lgc.o: lgc.c lua.h luaconf.h ldebug.h lstate.h lobject.h llimits.h ltm.h \ + lzio.h lmem.h ldo.h lfunc.h lgc.h lstring.h ltable.h +linit.o: linit.c lua.h luaconf.h lualib.h lauxlib.h +liolib.o: liolib.c lua.h luaconf.h lauxlib.h lualib.h +llex.o: llex.c lua.h luaconf.h ldo.h lobject.h llimits.h lstate.h ltm.h \ + lzio.h lmem.h llex.h lparser.h lstring.h lgc.h ltable.h +lmathlib.o: lmathlib.c lua.h luaconf.h lauxlib.h lualib.h +lmem.o: lmem.c lua.h luaconf.h ldebug.h lstate.h lobject.h llimits.h \ + ltm.h lzio.h lmem.h ldo.h +loadlib.o: loadlib.c lua.h luaconf.h lauxlib.h lualib.h +lobject.o: lobject.c lua.h luaconf.h ldo.h lobject.h llimits.h lstate.h \ + ltm.h lzio.h lmem.h lstring.h lgc.h lvm.h +lopcodes.o: lopcodes.c lopcodes.h llimits.h lua.h luaconf.h +loslib.o: loslib.c lua.h luaconf.h lauxlib.h lualib.h +lparser.o: lparser.c lua.h luaconf.h lcode.h llex.h lobject.h llimits.h \ + lzio.h lmem.h lopcodes.h lparser.h ldebug.h lstate.h ltm.h ldo.h \ + lfunc.h lstring.h lgc.h ltable.h +lstate.o: lstate.c lua.h luaconf.h ldebug.h lstate.h lobject.h llimits.h \ + ltm.h lzio.h lmem.h ldo.h lfunc.h lgc.h llex.h lstring.h ltable.h +lstring.o: lstring.c lua.h luaconf.h lmem.h llimits.h lobject.h lstate.h \ + ltm.h lzio.h lstring.h lgc.h +lstrlib.o: lstrlib.c lua.h luaconf.h lauxlib.h lualib.h +ltable.o: ltable.c lua.h luaconf.h ldebug.h lstate.h lobject.h llimits.h \ + ltm.h lzio.h lmem.h ldo.h lgc.h ltable.h +ltablib.o: ltablib.c lua.h luaconf.h lauxlib.h lualib.h +ltm.o: ltm.c lua.h luaconf.h lobject.h llimits.h lstate.h ltm.h lzio.h \ + lmem.h lstring.h lgc.h ltable.h +lua.o: lua.c lua.h luaconf.h lauxlib.h lualib.h +luac.o: luac.c lua.h luaconf.h lauxlib.h ldo.h lobject.h llimits.h \ + lstate.h ltm.h lzio.h lmem.h lfunc.h lopcodes.h lstring.h lgc.h \ + lundump.h +lundump.o: lundump.c lua.h luaconf.h ldebug.h lstate.h lobject.h \ + llimits.h ltm.h lzio.h lmem.h ldo.h lfunc.h lstring.h lgc.h lundump.h +lvm.o: lvm.c lua.h luaconf.h ldebug.h lstate.h lobject.h llimits.h ltm.h \ + lzio.h lmem.h ldo.h lfunc.h lgc.h lopcodes.h lstring.h ltable.h lvm.h +lzio.o: lzio.c lua.h luaconf.h llimits.h lmem.h lstate.h lobject.h ltm.h \ + lzio.h +print.o: print.c ldebug.h lstate.h lua.h luaconf.h lobject.h llimits.h \ + ltm.h lzio.h lmem.h lopcodes.h lundump.h + +# (end of Makefile) diff --git a/deps/lua/src/fpconv.c b/deps/lua/src/fpconv.c new file mode 100644 index 0000000..7990831 --- /dev/null +++ b/deps/lua/src/fpconv.c @@ -0,0 +1,205 @@ +/* fpconv - Floating point conversion routines + * + * Copyright (c) 2011-2012 Mark Pulford + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* JSON uses a '.' decimal separator. strtod() / sprintf() under C libraries + * with locale support will break when the decimal separator is a comma. + * + * fpconv_* will around these issues with a translation buffer if required. + */ + +#include +#include +#include +#include + +#include "fpconv.h" + +/* Lua CJSON assumes the locale is the same for all threads within a + * process and doesn't change after initialisation. + * + * This avoids the need for per thread storage or expensive checks + * for call. */ +static char locale_decimal_point = '.'; + +/* In theory multibyte decimal_points are possible, but + * Lua CJSON only supports UTF-8 and known locales only have + * single byte decimal points ([.,]). + * + * localconv() may not be thread safe (=>crash), and nl_langinfo() is + * not supported on some platforms. Use sprintf() instead - if the + * locale does change, at least Lua CJSON won't crash. */ +static void fpconv_update_locale() +{ + char buf[8]; + + snprintf(buf, sizeof(buf), "%g", 0.5); + + /* Failing this test might imply the platform has a buggy dtoa + * implementation or wide characters */ + if (buf[0] != '0' || buf[2] != '5' || buf[3] != 0) { + fprintf(stderr, "Error: wide characters found or printf() bug."); + abort(); + } + + locale_decimal_point = buf[1]; +} + +/* Check for a valid number character: [-+0-9a-yA-Y.] + * Eg: -0.6e+5, infinity, 0xF0.F0pF0 + * + * Used to find the probable end of a number. It doesn't matter if + * invalid characters are counted - strtod() will find the valid + * number if it exists. The risk is that slightly more memory might + * be allocated before a parse error occurs. */ +static inline int valid_number_character(char ch) +{ + char lower_ch; + + if ('0' <= ch && ch <= '9') + return 1; + if (ch == '-' || ch == '+' || ch == '.') + return 1; + + /* Hex digits, exponent (e), base (p), "infinity",.. */ + lower_ch = ch | 0x20; + if ('a' <= lower_ch && lower_ch <= 'y') + return 1; + + return 0; +} + +/* Calculate the size of the buffer required for a strtod locale + * conversion. */ +static int strtod_buffer_size(const char *s) +{ + const char *p = s; + + while (valid_number_character(*p)) + p++; + + return p - s; +} + +/* Similar to strtod(), but must be passed the current locale's decimal point + * character. Guaranteed to be called at the start of any valid number in a string */ +double fpconv_strtod(const char *nptr, char **endptr) +{ + char localbuf[FPCONV_G_FMT_BUFSIZE]; + char *buf, *endbuf, *dp; + int buflen; + double value; + + /* System strtod() is fine when decimal point is '.' */ + if (locale_decimal_point == '.') + return strtod(nptr, endptr); + + buflen = strtod_buffer_size(nptr); + if (!buflen) { + /* No valid characters found, standard strtod() return */ + *endptr = (char *)nptr; + return 0; + } + + /* Duplicate number into buffer */ + if (buflen >= FPCONV_G_FMT_BUFSIZE) { + /* Handle unusually large numbers */ + buf = malloc(buflen + 1); + if (!buf) { + fprintf(stderr, "Out of memory"); + abort(); + } + } else { + /* This is the common case.. */ + buf = localbuf; + } + memcpy(buf, nptr, buflen); + buf[buflen] = 0; + + /* Update decimal point character if found */ + dp = strchr(buf, '.'); + if (dp) + *dp = locale_decimal_point; + + value = strtod(buf, &endbuf); + *endptr = (char *)&nptr[endbuf - buf]; + if (buflen >= FPCONV_G_FMT_BUFSIZE) + free(buf); + + return value; +} + +/* "fmt" must point to a buffer of at least 6 characters */ +static void set_number_format(char *fmt, int precision) +{ + int d1, d2, i; + + assert(1 <= precision && precision <= 14); + + /* Create printf format (%.14g) from precision */ + d1 = precision / 10; + d2 = precision % 10; + fmt[0] = '%'; + fmt[1] = '.'; + i = 2; + if (d1) { + fmt[i++] = '0' + d1; + } + fmt[i++] = '0' + d2; + fmt[i++] = 'g'; + fmt[i] = 0; +} + +/* Assumes there is always at least 32 characters available in the target buffer */ +int fpconv_g_fmt(char *str, double num, int precision) +{ + char buf[FPCONV_G_FMT_BUFSIZE]; + char fmt[6]; + int len; + char *b; + + set_number_format(fmt, precision); + + /* Pass through when decimal point character is dot. */ + if (locale_decimal_point == '.') + return snprintf(str, FPCONV_G_FMT_BUFSIZE, fmt, num); + + /* snprintf() to a buffer then translate for other decimal point characters */ + len = snprintf(buf, FPCONV_G_FMT_BUFSIZE, fmt, num); + + /* Copy into target location. Translate decimal point if required */ + b = buf; + do { + *str++ = (*b == locale_decimal_point ? '.' : *b); + } while(*b++); + + return len; +} + +void fpconv_init() +{ + fpconv_update_locale(); +} + +/* vi:ai et sw=4 ts=4: + */ diff --git a/deps/lua/src/fpconv.h b/deps/lua/src/fpconv.h new file mode 100644 index 0000000..7b0d0ee --- /dev/null +++ b/deps/lua/src/fpconv.h @@ -0,0 +1,22 @@ +/* Lua CJSON floating point conversion routines */ + +/* Buffer required to store the largest string representation of a double. + * + * Longest double printed with %.14g is 21 characters long: + * -1.7976931348623e+308 */ +# define FPCONV_G_FMT_BUFSIZE 32 + +#ifdef USE_INTERNAL_FPCONV +static inline void fpconv_init() +{ + /* Do nothing - not required */ +} +#else +extern void fpconv_init(); +#endif + +extern int fpconv_g_fmt(char*, double, int); +extern double fpconv_strtod(const char*, char**); + +/* vi:ai et sw=4 ts=4: + */ diff --git a/deps/lua/src/lapi.c b/deps/lua/src/lapi.c new file mode 100644 index 0000000..e8ef41e --- /dev/null +++ b/deps/lua/src/lapi.c @@ -0,0 +1,1109 @@ +/* +** $Id: lapi.c,v 2.55.1.5 2008/07/04 18:41:18 roberto Exp $ +** Lua API +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include +#include + +#define lapi_c +#define LUA_CORE + +#include "lua.h" + +#include "lapi.h" +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lgc.h" +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" +#include "lundump.h" +#include "lvm.h" + + + +const char lua_ident[] = + "$Lua: " LUA_RELEASE " " LUA_COPYRIGHT " $\n" + "$Authors: " LUA_AUTHORS " $\n" + "$URL: www.lua.org $\n"; + + + +#define api_checknelems(L, n) api_check(L, (n) <= (L->top - L->base)) + +#define api_checkvalidindex(L, i) api_check(L, (i) != luaO_nilobject) + +#define api_incr_top(L) {api_check(L, L->top < L->ci->top); L->top++;} + + + +static TValue *index2adr (lua_State *L, int idx) { + if (idx > 0) { + TValue *o = L->base + (idx - 1); + api_check(L, idx <= L->ci->top - L->base); + if (o >= L->top) return cast(TValue *, luaO_nilobject); + else return o; + } + else if (idx > LUA_REGISTRYINDEX) { + api_check(L, idx != 0 && -idx <= L->top - L->base); + return L->top + idx; + } + else switch (idx) { /* pseudo-indices */ + case LUA_REGISTRYINDEX: return registry(L); + case LUA_ENVIRONINDEX: { + Closure *func = curr_func(L); + sethvalue(L, &L->env, func->c.env); + return &L->env; + } + case LUA_GLOBALSINDEX: return gt(L); + default: { + Closure *func = curr_func(L); + idx = LUA_GLOBALSINDEX - idx; + return (idx <= func->c.nupvalues) + ? &func->c.upvalue[idx-1] + : cast(TValue *, luaO_nilobject); + } + } +} + + +static Table *getcurrenv (lua_State *L) { + if (L->ci == L->base_ci) /* no enclosing function? */ + return hvalue(gt(L)); /* use global table as environment */ + else { + Closure *func = curr_func(L); + return func->c.env; + } +} + + +void luaA_pushobject (lua_State *L, const TValue *o) { + setobj2s(L, L->top, o); + api_incr_top(L); +} + + +LUA_API int lua_checkstack (lua_State *L, int size) { + int res = 1; + lua_lock(L); + if (size > LUAI_MAXCSTACK || (L->top - L->base + size) > LUAI_MAXCSTACK) + res = 0; /* stack overflow */ + else if (size > 0) { + luaD_checkstack(L, size); + if (L->ci->top < L->top + size) + L->ci->top = L->top + size; + } + lua_unlock(L); + return res; +} + + +LUA_API void lua_xmove (lua_State *from, lua_State *to, int n) { + int i; + if (from == to) return; + lua_lock(to); + api_checknelems(from, n); + api_check(from, G(from) == G(to)); + api_check(from, to->ci->top - to->top >= n); + from->top -= n; + for (i = 0; i < n; i++) { + setobj2s(to, to->top++, from->top + i); + } + lua_unlock(to); +} + + +LUA_API void lua_setlevel (lua_State *from, lua_State *to) { + to->nCcalls = from->nCcalls; +} + + +LUA_API lua_CFunction lua_atpanic (lua_State *L, lua_CFunction panicf) { + lua_CFunction old; + lua_lock(L); + old = G(L)->panic; + G(L)->panic = panicf; + lua_unlock(L); + return old; +} + + +LUA_API lua_State *lua_newthread (lua_State *L) { + lua_State *L1; + lua_lock(L); + luaC_checkGC(L); + L1 = luaE_newthread(L); + setthvalue(L, L->top, L1); + api_incr_top(L); + lua_unlock(L); + luai_userstatethread(L, L1); + return L1; +} + + + +/* +** basic stack manipulation +*/ + + +LUA_API int lua_gettop (lua_State *L) { + return cast_int(L->top - L->base); +} + + +LUA_API void lua_settop (lua_State *L, int idx) { + lua_lock(L); + if (idx >= 0) { + api_check(L, idx <= L->stack_last - L->base); + while (L->top < L->base + idx) + setnilvalue(L->top++); + L->top = L->base + idx; + } + else { + api_check(L, -(idx+1) <= (L->top - L->base)); + L->top += idx+1; /* `subtract' index (index is negative) */ + } + lua_unlock(L); +} + + +LUA_API void lua_remove (lua_State *L, int idx) { + StkId p; + lua_lock(L); + p = index2adr(L, idx); + api_checkvalidindex(L, p); + while (++p < L->top) setobjs2s(L, p-1, p); + L->top--; + lua_unlock(L); +} + + +LUA_API void lua_insert (lua_State *L, int idx) { + StkId p; + StkId q; + lua_lock(L); + p = index2adr(L, idx); + api_checkvalidindex(L, p); + for (q = L->top; q>p; q--) setobjs2s(L, q, q-1); + setobjs2s(L, p, L->top); + lua_unlock(L); +} + + +LUA_API void lua_replace (lua_State *L, int idx) { + StkId o; + lua_lock(L); + /* explicit test for incompatible code */ + if (idx == LUA_ENVIRONINDEX && L->ci == L->base_ci) + luaG_runerror(L, "no calling environment"); + api_checknelems(L, 1); + o = index2adr(L, idx); + api_checkvalidindex(L, o); + if (idx == LUA_ENVIRONINDEX) { + Closure *func = curr_func(L); + api_check(L, ttistable(L->top - 1)); + func->c.env = hvalue(L->top - 1); + luaC_barrier(L, func, L->top - 1); + } + else { + setobj(L, o, L->top - 1); + if (idx < LUA_GLOBALSINDEX) /* function upvalue? */ + luaC_barrier(L, curr_func(L), L->top - 1); + } + L->top--; + lua_unlock(L); +} + + +LUA_API void lua_pushvalue (lua_State *L, int idx) { + lua_lock(L); + setobj2s(L, L->top, index2adr(L, idx)); + api_incr_top(L); + lua_unlock(L); +} + + + +/* +** access functions (stack -> C) +*/ + + +LUA_API int lua_type (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + return (o == luaO_nilobject) ? LUA_TNONE : ttype(o); +} + + +LUA_API const char *lua_typename (lua_State *L, int t) { + UNUSED(L); + return (t == LUA_TNONE) ? "no value" : luaT_typenames[t]; +} + + +LUA_API int lua_iscfunction (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + return iscfunction(o); +} + + +LUA_API int lua_isnumber (lua_State *L, int idx) { + TValue n; + const TValue *o = index2adr(L, idx); + return tonumber(o, &n); +} + + +LUA_API int lua_isstring (lua_State *L, int idx) { + int t = lua_type(L, idx); + return (t == LUA_TSTRING || t == LUA_TNUMBER); +} + + +LUA_API int lua_isuserdata (lua_State *L, int idx) { + const TValue *o = index2adr(L, idx); + return (ttisuserdata(o) || ttislightuserdata(o)); +} + + +LUA_API int lua_rawequal (lua_State *L, int index1, int index2) { + StkId o1 = index2adr(L, index1); + StkId o2 = index2adr(L, index2); + return (o1 == luaO_nilobject || o2 == luaO_nilobject) ? 0 + : luaO_rawequalObj(o1, o2); +} + + +LUA_API int lua_equal (lua_State *L, int index1, int index2) { + StkId o1, o2; + int i; + lua_lock(L); /* may call tag method */ + o1 = index2adr(L, index1); + o2 = index2adr(L, index2); + i = (o1 == luaO_nilobject || o2 == luaO_nilobject) ? 0 : equalobj(L, o1, o2); + lua_unlock(L); + return i; +} + + +LUA_API int lua_lessthan (lua_State *L, int index1, int index2) { + StkId o1, o2; + int i; + lua_lock(L); /* may call tag method */ + o1 = index2adr(L, index1); + o2 = index2adr(L, index2); + i = (o1 == luaO_nilobject || o2 == luaO_nilobject) ? 0 + : luaV_lessthan(L, o1, o2); + lua_unlock(L); + return i; +} + + + +LUA_API lua_Number lua_tonumber (lua_State *L, int idx) { + TValue n; + const TValue *o = index2adr(L, idx); + if (tonumber(o, &n)) + return nvalue(o); + else + return 0; +} + + +LUA_API lua_Integer lua_tointeger (lua_State *L, int idx) { + TValue n; + const TValue *o = index2adr(L, idx); + if (tonumber(o, &n)) { + lua_Integer res; + lua_Number num = nvalue(o); + lua_number2integer(res, num); + return res; + } + else + return 0; +} + + +LUA_API int lua_toboolean (lua_State *L, int idx) { + const TValue *o = index2adr(L, idx); + return !l_isfalse(o); +} + + +LUA_API const char *lua_tolstring (lua_State *L, int idx, size_t *len) { + StkId o = index2adr(L, idx); + if (!ttisstring(o)) { + lua_lock(L); /* `luaV_tostring' may create a new string */ + if (!luaV_tostring(L, o)) { /* conversion failed? */ + if (len != NULL) *len = 0; + lua_unlock(L); + return NULL; + } + luaC_checkGC(L); + o = index2adr(L, idx); /* previous call may reallocate the stack */ + lua_unlock(L); + } + if (len != NULL) *len = tsvalue(o)->len; + return svalue(o); +} + + +LUA_API size_t lua_objlen (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + switch (ttype(o)) { + case LUA_TSTRING: return tsvalue(o)->len; + case LUA_TUSERDATA: return uvalue(o)->len; + case LUA_TTABLE: return luaH_getn(hvalue(o)); + case LUA_TNUMBER: { + size_t l; + lua_lock(L); /* `luaV_tostring' may create a new string */ + l = (luaV_tostring(L, o) ? tsvalue(o)->len : 0); + lua_unlock(L); + return l; + } + default: return 0; + } +} + + +LUA_API lua_CFunction lua_tocfunction (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + return (!iscfunction(o)) ? NULL : clvalue(o)->c.f; +} + + +LUA_API void *lua_touserdata (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + switch (ttype(o)) { + case LUA_TUSERDATA: return (rawuvalue(o) + 1); + case LUA_TLIGHTUSERDATA: return pvalue(o); + default: return NULL; + } +} + + +LUA_API lua_State *lua_tothread (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + return (!ttisthread(o)) ? NULL : thvalue(o); +} + + +LUA_API const void *lua_topointer (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + switch (ttype(o)) { + case LUA_TTABLE: return hvalue(o); + case LUA_TFUNCTION: return clvalue(o); + case LUA_TTHREAD: return thvalue(o); + case LUA_TUSERDATA: + case LUA_TLIGHTUSERDATA: + return lua_touserdata(L, idx); + default: return NULL; + } +} + + + +/* +** push functions (C -> stack) +*/ + + +LUA_API void lua_pushnil (lua_State *L) { + lua_lock(L); + setnilvalue(L->top); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_pushnumber (lua_State *L, lua_Number n) { + lua_lock(L); + setnvalue(L->top, n); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_pushinteger (lua_State *L, lua_Integer n) { + lua_lock(L); + setnvalue(L->top, cast_num(n)); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_pushlstring (lua_State *L, const char *s, size_t len) { + lua_lock(L); + luaC_checkGC(L); + setsvalue2s(L, L->top, luaS_newlstr(L, s, len)); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_pushstring (lua_State *L, const char *s) { + if (s == NULL) + lua_pushnil(L); + else + lua_pushlstring(L, s, strlen(s)); +} + + +LUA_API const char *lua_pushvfstring (lua_State *L, const char *fmt, + va_list argp) { + const char *ret; + lua_lock(L); + luaC_checkGC(L); + ret = luaO_pushvfstring(L, fmt, argp); + lua_unlock(L); + return ret; +} + + +LUA_API const char *lua_pushfstring (lua_State *L, const char *fmt, ...) { + const char *ret; + va_list argp; + lua_lock(L); + luaC_checkGC(L); + va_start(argp, fmt); + ret = luaO_pushvfstring(L, fmt, argp); + va_end(argp); + lua_unlock(L); + return ret; +} + + +LUA_API void lua_pushcclosure (lua_State *L, lua_CFunction fn, int n) { + Closure *cl; + lua_lock(L); + luaC_checkGC(L); + api_checknelems(L, n); + cl = luaF_newCclosure(L, n, getcurrenv(L)); + cl->c.f = fn; + L->top -= n; + while (n--) + setobj2n(L, &cl->c.upvalue[n], L->top+n); + setclvalue(L, L->top, cl); + lua_assert(iswhite(obj2gco(cl))); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_pushboolean (lua_State *L, int b) { + lua_lock(L); + setbvalue(L->top, (b != 0)); /* ensure that true is 1 */ + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_pushlightuserdata (lua_State *L, void *p) { + lua_lock(L); + setpvalue(L->top, p); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API int lua_pushthread (lua_State *L) { + lua_lock(L); + setthvalue(L, L->top, L); + api_incr_top(L); + lua_unlock(L); + return (G(L)->mainthread == L); +} + + + +/* +** get functions (Lua -> stack) +*/ + + +LUA_API void lua_gettable (lua_State *L, int idx) { + StkId t; + lua_lock(L); + t = index2adr(L, idx); + api_checkvalidindex(L, t); + luaV_gettable(L, t, L->top - 1, L->top - 1); + lua_unlock(L); +} + + +LUA_API void lua_getfield (lua_State *L, int idx, const char *k) { + StkId t; + TValue key; + lua_lock(L); + t = index2adr(L, idx); + api_checkvalidindex(L, t); + setsvalue(L, &key, luaS_new(L, k)); + luaV_gettable(L, t, &key, L->top); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_rawget (lua_State *L, int idx) { + StkId t; + lua_lock(L); + t = index2adr(L, idx); + api_check(L, ttistable(t)); + setobj2s(L, L->top - 1, luaH_get(hvalue(t), L->top - 1)); + lua_unlock(L); +} + + +LUA_API void lua_rawgeti (lua_State *L, int idx, int n) { + StkId o; + lua_lock(L); + o = index2adr(L, idx); + api_check(L, ttistable(o)); + setobj2s(L, L->top, luaH_getnum(hvalue(o), n)); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_createtable (lua_State *L, int narray, int nrec) { + lua_lock(L); + luaC_checkGC(L); + sethvalue(L, L->top, luaH_new(L, narray, nrec)); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API int lua_getmetatable (lua_State *L, int objindex) { + const TValue *obj; + Table *mt = NULL; + int res; + lua_lock(L); + obj = index2adr(L, objindex); + switch (ttype(obj)) { + case LUA_TTABLE: + mt = hvalue(obj)->metatable; + break; + case LUA_TUSERDATA: + mt = uvalue(obj)->metatable; + break; + default: + mt = G(L)->mt[ttype(obj)]; + break; + } + if (mt == NULL) + res = 0; + else { + sethvalue(L, L->top, mt); + api_incr_top(L); + res = 1; + } + lua_unlock(L); + return res; +} + + +LUA_API void lua_getfenv (lua_State *L, int idx) { + StkId o; + lua_lock(L); + o = index2adr(L, idx); + api_checkvalidindex(L, o); + switch (ttype(o)) { + case LUA_TFUNCTION: + sethvalue(L, L->top, clvalue(o)->c.env); + break; + case LUA_TUSERDATA: + sethvalue(L, L->top, uvalue(o)->env); + break; + case LUA_TTHREAD: + setobj2s(L, L->top, gt(thvalue(o))); + break; + default: + setnilvalue(L->top); + break; + } + api_incr_top(L); + lua_unlock(L); +} + + +/* +** set functions (stack -> Lua) +*/ + + +LUA_API void lua_settable (lua_State *L, int idx) { + StkId t; + lua_lock(L); + api_checknelems(L, 2); + t = index2adr(L, idx); + api_checkvalidindex(L, t); + luaV_settable(L, t, L->top - 2, L->top - 1); + L->top -= 2; /* pop index and value */ + lua_unlock(L); +} + + +LUA_API void lua_setfield (lua_State *L, int idx, const char *k) { + StkId t; + TValue key; + lua_lock(L); + api_checknelems(L, 1); + t = index2adr(L, idx); + api_checkvalidindex(L, t); + setsvalue(L, &key, luaS_new(L, k)); + luaV_settable(L, t, &key, L->top - 1); + L->top--; /* pop value */ + lua_unlock(L); +} + + +LUA_API void lua_rawset (lua_State *L, int idx) { + StkId t; + lua_lock(L); + api_checknelems(L, 2); + t = index2adr(L, idx); + api_check(L, ttistable(t)); + if (hvalue(t)->readonly) + luaG_runerror(L, "Attempt to modify a readonly table"); + setobj2t(L, luaH_set(L, hvalue(t), L->top-2), L->top-1); + luaC_barriert(L, hvalue(t), L->top-1); + L->top -= 2; + lua_unlock(L); +} + + +LUA_API void lua_rawseti (lua_State *L, int idx, int n) { + StkId o; + lua_lock(L); + api_checknelems(L, 1); + o = index2adr(L, idx); + api_check(L, ttistable(o)); + if (hvalue(o)->readonly) + luaG_runerror(L, "Attempt to modify a readonly table"); + setobj2t(L, luaH_setnum(L, hvalue(o), n), L->top-1); + luaC_barriert(L, hvalue(o), L->top-1); + L->top--; + lua_unlock(L); +} + + +LUA_API int lua_setmetatable (lua_State *L, int objindex) { + TValue *obj; + Table *mt; + lua_lock(L); + api_checknelems(L, 1); + obj = index2adr(L, objindex); + api_checkvalidindex(L, obj); + if (ttisnil(L->top - 1)) + mt = NULL; + else { + api_check(L, ttistable(L->top - 1)); + mt = hvalue(L->top - 1); + } + switch (ttype(obj)) { + case LUA_TTABLE: { + if (hvalue(obj)->readonly) + luaG_runerror(L, "Attempt to modify a readonly table"); + hvalue(obj)->metatable = mt; + if (mt) + luaC_objbarriert(L, hvalue(obj), mt); + break; + } + case LUA_TUSERDATA: { + uvalue(obj)->metatable = mt; + if (mt) + luaC_objbarrier(L, rawuvalue(obj), mt); + break; + } + default: { + G(L)->mt[ttype(obj)] = mt; + break; + } + } + L->top--; + lua_unlock(L); + return 1; +} + + +LUA_API int lua_setfenv (lua_State *L, int idx) { + StkId o; + int res = 1; + lua_lock(L); + api_checknelems(L, 1); + o = index2adr(L, idx); + api_checkvalidindex(L, o); + api_check(L, ttistable(L->top - 1)); + switch (ttype(o)) { + case LUA_TFUNCTION: + clvalue(o)->c.env = hvalue(L->top - 1); + break; + case LUA_TUSERDATA: + uvalue(o)->env = hvalue(L->top - 1); + break; + case LUA_TTHREAD: + sethvalue(L, gt(thvalue(o)), hvalue(L->top - 1)); + break; + default: + res = 0; + break; + } + if (res) luaC_objbarrier(L, gcvalue(o), hvalue(L->top - 1)); + L->top--; + lua_unlock(L); + return res; +} + + +/* +** `load' and `call' functions (run Lua code) +*/ + + +#define adjustresults(L,nres) \ + { if (nres == LUA_MULTRET && L->top >= L->ci->top) L->ci->top = L->top; } + + +#define checkresults(L,na,nr) \ + api_check(L, (nr) == LUA_MULTRET || (L->ci->top - L->top >= (nr) - (na))) + + +LUA_API void lua_call (lua_State *L, int nargs, int nresults) { + StkId func; + lua_lock(L); + api_checknelems(L, nargs+1); + checkresults(L, nargs, nresults); + func = L->top - (nargs+1); + luaD_call(L, func, nresults); + adjustresults(L, nresults); + lua_unlock(L); +} + + + +/* +** Execute a protected call. +*/ +struct CallS { /* data to `f_call' */ + StkId func; + int nresults; +}; + + +static void f_call (lua_State *L, void *ud) { + struct CallS *c = cast(struct CallS *, ud); + luaD_call(L, c->func, c->nresults); +} + + + +LUA_API int lua_pcall (lua_State *L, int nargs, int nresults, int errfunc) { + struct CallS c; + int status; + ptrdiff_t func; + lua_lock(L); + api_checknelems(L, nargs+1); + checkresults(L, nargs, nresults); + if (errfunc == 0) + func = 0; + else { + StkId o = index2adr(L, errfunc); + api_checkvalidindex(L, o); + func = savestack(L, o); + } + c.func = L->top - (nargs+1); /* function to be called */ + c.nresults = nresults; + status = luaD_pcall(L, f_call, &c, savestack(L, c.func), func); + adjustresults(L, nresults); + lua_unlock(L); + return status; +} + + +/* +** Execute a protected C call. +*/ +struct CCallS { /* data to `f_Ccall' */ + lua_CFunction func; + void *ud; +}; + + +static void f_Ccall (lua_State *L, void *ud) { + struct CCallS *c = cast(struct CCallS *, ud); + Closure *cl; + cl = luaF_newCclosure(L, 0, getcurrenv(L)); + cl->c.f = c->func; + setclvalue(L, L->top, cl); /* push function */ + api_incr_top(L); + setpvalue(L->top, c->ud); /* push only argument */ + api_incr_top(L); + luaD_call(L, L->top - 2, 0); +} + + +LUA_API int lua_cpcall (lua_State *L, lua_CFunction func, void *ud) { + struct CCallS c; + int status; + lua_lock(L); + c.func = func; + c.ud = ud; + status = luaD_pcall(L, f_Ccall, &c, savestack(L, L->top), 0); + lua_unlock(L); + return status; +} + + +LUA_API int lua_load (lua_State *L, lua_Reader reader, void *data, + const char *chunkname) { + ZIO z; + int status; + lua_lock(L); + if (!chunkname) chunkname = "?"; + luaZ_init(L, &z, reader, data); + status = luaD_protectedparser(L, &z, chunkname); + lua_unlock(L); + return status; +} + + +LUA_API int lua_dump (lua_State *L, lua_Writer writer, void *data) { + int status; + TValue *o; + lua_lock(L); + api_checknelems(L, 1); + o = L->top - 1; + if (isLfunction(o)) + status = luaU_dump(L, clvalue(o)->l.p, writer, data, 0); + else + status = 1; + lua_unlock(L); + return status; +} + + +LUA_API int lua_status (lua_State *L) { + return L->status; +} + + +/* +** Garbage-collection function +*/ + +LUA_API int lua_gc (lua_State *L, int what, int data) { + int res = 0; + global_State *g; + lua_lock(L); + g = G(L); + switch (what) { + case LUA_GCSTOP: { + g->GCthreshold = MAX_LUMEM; + break; + } + case LUA_GCRESTART: { + g->GCthreshold = g->totalbytes; + break; + } + case LUA_GCCOLLECT: { + luaC_fullgc(L); + break; + } + case LUA_GCCOUNT: { + /* GC values are expressed in Kbytes: #bytes/2^10 */ + res = cast_int(g->totalbytes >> 10); + break; + } + case LUA_GCCOUNTB: { + res = cast_int(g->totalbytes & 0x3ff); + break; + } + case LUA_GCSTEP: { + lu_mem a = (cast(lu_mem, data) << 10); + if (a <= g->totalbytes) + g->GCthreshold = g->totalbytes - a; + else + g->GCthreshold = 0; + while (g->GCthreshold <= g->totalbytes) { + luaC_step(L); + if (g->gcstate == GCSpause) { /* end of cycle? */ + res = 1; /* signal it */ + break; + } + } + break; + } + case LUA_GCSETPAUSE: { + res = g->gcpause; + g->gcpause = data; + break; + } + case LUA_GCSETSTEPMUL: { + res = g->gcstepmul; + g->gcstepmul = data; + break; + } + default: res = -1; /* invalid option */ + } + lua_unlock(L); + return res; +} + + + +/* +** miscellaneous functions +*/ + + +LUA_API int lua_error (lua_State *L) { + lua_lock(L); + api_checknelems(L, 1); + luaG_errormsg(L); + lua_unlock(L); + return 0; /* to avoid warnings */ +} + + +LUA_API int lua_next (lua_State *L, int idx) { + StkId t; + int more; + lua_lock(L); + t = index2adr(L, idx); + api_check(L, ttistable(t)); + more = luaH_next(L, hvalue(t), L->top - 1); + if (more) { + api_incr_top(L); + } + else /* no more elements */ + L->top -= 1; /* remove key */ + lua_unlock(L); + return more; +} + + +LUA_API void lua_concat (lua_State *L, int n) { + lua_lock(L); + api_checknelems(L, n); + if (n >= 2) { + luaC_checkGC(L); + luaV_concat(L, n, cast_int(L->top - L->base) - 1); + L->top -= (n-1); + } + else if (n == 0) { /* push empty string */ + setsvalue2s(L, L->top, luaS_newlstr(L, "", 0)); + api_incr_top(L); + } + /* else n == 1; nothing to do */ + lua_unlock(L); +} + + +LUA_API lua_Alloc lua_getallocf (lua_State *L, void **ud) { + lua_Alloc f; + lua_lock(L); + if (ud) *ud = G(L)->ud; + f = G(L)->frealloc; + lua_unlock(L); + return f; +} + + +LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud) { + lua_lock(L); + G(L)->ud = ud; + G(L)->frealloc = f; + lua_unlock(L); +} + + +LUA_API void *lua_newuserdata (lua_State *L, size_t size) { + Udata *u; + lua_lock(L); + luaC_checkGC(L); + u = luaS_newudata(L, size, getcurrenv(L)); + setuvalue(L, L->top, u); + api_incr_top(L); + lua_unlock(L); + return u + 1; +} + + + + +static const char *aux_upvalue (StkId fi, int n, TValue **val) { + Closure *f; + if (!ttisfunction(fi)) return NULL; + f = clvalue(fi); + if (f->c.isC) { + if (!(1 <= n && n <= f->c.nupvalues)) return NULL; + *val = &f->c.upvalue[n-1]; + return ""; + } + else { + Proto *p = f->l.p; + if (!(1 <= n && n <= p->sizeupvalues)) return NULL; + *val = f->l.upvals[n-1]->v; + return getstr(p->upvalues[n-1]); + } +} + + +LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n) { + const char *name; + TValue *val; + lua_lock(L); + name = aux_upvalue(index2adr(L, funcindex), n, &val); + if (name) { + setobj2s(L, L->top, val); + api_incr_top(L); + } + lua_unlock(L); + return name; +} + + +LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n) { + const char *name; + TValue *val; + StkId fi; + lua_lock(L); + fi = index2adr(L, funcindex); + api_checknelems(L, 1); + name = aux_upvalue(fi, n, &val); + if (name) { + L->top--; + setobj(L, val, L->top); + luaC_barrier(L, clvalue(fi), L->top); + } + lua_unlock(L); + return name; +} + +LUA_API void lua_enablereadonlytable (lua_State *L, int objindex, int enabled) { + const TValue* o = index2adr(L, objindex); + api_check(L, ttistable(o)); + Table* t = hvalue(o); + api_check(L, t != hvalue(registry(L))); + t->readonly = enabled; +} + +LUA_API int lua_isreadonlytable (lua_State *L, int objindex) { + const TValue* o = index2adr(L, objindex); + api_check(L, ttistable(o)); + Table* t = hvalue(o); + api_check(L, t != hvalue(registry(L))); + return t->readonly; +} + diff --git a/deps/lua/src/lapi.h b/deps/lua/src/lapi.h new file mode 100644 index 0000000..2c3fab2 --- /dev/null +++ b/deps/lua/src/lapi.h @@ -0,0 +1,16 @@ +/* +** $Id: lapi.h,v 2.2.1.1 2007/12/27 13:02:25 roberto Exp $ +** Auxiliary functions from Lua API +** See Copyright Notice in lua.h +*/ + +#ifndef lapi_h +#define lapi_h + + +#include "lobject.h" + + +LUAI_FUNC void luaA_pushobject (lua_State *L, const TValue *o); + +#endif diff --git a/deps/lua/src/lauxlib.c b/deps/lua/src/lauxlib.c new file mode 100644 index 0000000..751f1e8 --- /dev/null +++ b/deps/lua/src/lauxlib.c @@ -0,0 +1,652 @@ +/* +** $Id: lauxlib.c,v 1.159.1.3 2008/01/21 13:20:51 roberto Exp $ +** Auxiliary functions for building Lua libraries +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include +#include +#include +#include + + +/* This file uses only the official API of Lua. +** Any function declared here could be written as an application function. +*/ + +#define lauxlib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" + + +#define FREELIST_REF 0 /* free list of references */ + + +/* convert a stack index to positive */ +#define abs_index(L, i) ((i) > 0 || (i) <= LUA_REGISTRYINDEX ? (i) : \ + lua_gettop(L) + (i) + 1) + + +/* +** {====================================================== +** Error-report functions +** ======================================================= +*/ + + +LUALIB_API int luaL_argerror (lua_State *L, int narg, const char *extramsg) { + lua_Debug ar; + if (!lua_getstack(L, 0, &ar)) /* no stack frame? */ + return luaL_error(L, "bad argument #%d (%s)", narg, extramsg); + lua_getinfo(L, "n", &ar); + if (strcmp(ar.namewhat, "method") == 0) { + narg--; /* do not count `self' */ + if (narg == 0) /* error is in the self argument itself? */ + return luaL_error(L, "calling " LUA_QS " on bad self (%s)", + ar.name, extramsg); + } + if (ar.name == NULL) + ar.name = "?"; + return luaL_error(L, "bad argument #%d to " LUA_QS " (%s)", + narg, ar.name, extramsg); +} + + +LUALIB_API int luaL_typerror (lua_State *L, int narg, const char *tname) { + const char *msg = lua_pushfstring(L, "%s expected, got %s", + tname, luaL_typename(L, narg)); + return luaL_argerror(L, narg, msg); +} + + +static void tag_error (lua_State *L, int narg, int tag) { + luaL_typerror(L, narg, lua_typename(L, tag)); +} + + +LUALIB_API void luaL_where (lua_State *L, int level) { + lua_Debug ar; + if (lua_getstack(L, level, &ar)) { /* check function at level */ + lua_getinfo(L, "Sl", &ar); /* get info about it */ + if (ar.currentline > 0) { /* is there info? */ + lua_pushfstring(L, "%s:%d: ", ar.short_src, ar.currentline); + return; + } + } + lua_pushliteral(L, ""); /* else, no information available... */ +} + + +LUALIB_API int luaL_error (lua_State *L, const char *fmt, ...) { + va_list argp; + va_start(argp, fmt); + luaL_where(L, 1); + lua_pushvfstring(L, fmt, argp); + va_end(argp); + lua_concat(L, 2); + return lua_error(L); +} + +/* }====================================================== */ + + +LUALIB_API int luaL_checkoption (lua_State *L, int narg, const char *def, + const char *const lst[]) { + const char *name = (def) ? luaL_optstring(L, narg, def) : + luaL_checkstring(L, narg); + int i; + for (i=0; lst[i]; i++) + if (strcmp(lst[i], name) == 0) + return i; + return luaL_argerror(L, narg, + lua_pushfstring(L, "invalid option " LUA_QS, name)); +} + + +LUALIB_API int luaL_newmetatable (lua_State *L, const char *tname) { + lua_getfield(L, LUA_REGISTRYINDEX, tname); /* get registry.name */ + if (!lua_isnil(L, -1)) /* name already in use? */ + return 0; /* leave previous value on top, but return 0 */ + lua_pop(L, 1); + lua_newtable(L); /* create metatable */ + lua_pushvalue(L, -1); + lua_setfield(L, LUA_REGISTRYINDEX, tname); /* registry.name = metatable */ + return 1; +} + + +LUALIB_API void *luaL_checkudata (lua_State *L, int ud, const char *tname) { + void *p = lua_touserdata(L, ud); + if (p != NULL) { /* value is a userdata? */ + if (lua_getmetatable(L, ud)) { /* does it have a metatable? */ + lua_getfield(L, LUA_REGISTRYINDEX, tname); /* get correct metatable */ + if (lua_rawequal(L, -1, -2)) { /* does it have the correct mt? */ + lua_pop(L, 2); /* remove both metatables */ + return p; + } + } + } + luaL_typerror(L, ud, tname); /* else error */ + return NULL; /* to avoid warnings */ +} + + +LUALIB_API void luaL_checkstack (lua_State *L, int space, const char *mes) { + if (!lua_checkstack(L, space)) + luaL_error(L, "stack overflow (%s)", mes); +} + + +LUALIB_API void luaL_checktype (lua_State *L, int narg, int t) { + if (lua_type(L, narg) != t) + tag_error(L, narg, t); +} + + +LUALIB_API void luaL_checkany (lua_State *L, int narg) { + if (lua_type(L, narg) == LUA_TNONE) + luaL_argerror(L, narg, "value expected"); +} + + +LUALIB_API const char *luaL_checklstring (lua_State *L, int narg, size_t *len) { + const char *s = lua_tolstring(L, narg, len); + if (!s) tag_error(L, narg, LUA_TSTRING); + return s; +} + + +LUALIB_API const char *luaL_optlstring (lua_State *L, int narg, + const char *def, size_t *len) { + if (lua_isnoneornil(L, narg)) { + if (len) + *len = (def ? strlen(def) : 0); + return def; + } + else return luaL_checklstring(L, narg, len); +} + + +LUALIB_API lua_Number luaL_checknumber (lua_State *L, int narg) { + lua_Number d = lua_tonumber(L, narg); + if (d == 0 && !lua_isnumber(L, narg)) /* avoid extra test when d is not 0 */ + tag_error(L, narg, LUA_TNUMBER); + return d; +} + + +LUALIB_API lua_Number luaL_optnumber (lua_State *L, int narg, lua_Number def) { + return luaL_opt(L, luaL_checknumber, narg, def); +} + + +LUALIB_API lua_Integer luaL_checkinteger (lua_State *L, int narg) { + lua_Integer d = lua_tointeger(L, narg); + if (d == 0 && !lua_isnumber(L, narg)) /* avoid extra test when d is not 0 */ + tag_error(L, narg, LUA_TNUMBER); + return d; +} + + +LUALIB_API lua_Integer luaL_optinteger (lua_State *L, int narg, + lua_Integer def) { + return luaL_opt(L, luaL_checkinteger, narg, def); +} + + +LUALIB_API int luaL_getmetafield (lua_State *L, int obj, const char *event) { + if (!lua_getmetatable(L, obj)) /* no metatable? */ + return 0; + lua_pushstring(L, event); + lua_rawget(L, -2); + if (lua_isnil(L, -1)) { + lua_pop(L, 2); /* remove metatable and metafield */ + return 0; + } + else { + lua_remove(L, -2); /* remove only metatable */ + return 1; + } +} + + +LUALIB_API int luaL_callmeta (lua_State *L, int obj, const char *event) { + obj = abs_index(L, obj); + if (!luaL_getmetafield(L, obj, event)) /* no metafield? */ + return 0; + lua_pushvalue(L, obj); + lua_call(L, 1, 1); + return 1; +} + + +LUALIB_API void (luaL_register) (lua_State *L, const char *libname, + const luaL_Reg *l) { + luaI_openlib(L, libname, l, 0); +} + + +static int libsize (const luaL_Reg *l) { + int size = 0; + for (; l->name; l++) size++; + return size; +} + + +LUALIB_API void luaI_openlib (lua_State *L, const char *libname, + const luaL_Reg *l, int nup) { + if (libname) { + int size = libsize(l); + /* check whether lib already exists */ + luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 1); + lua_getfield(L, -1, libname); /* get _LOADED[libname] */ + if (!lua_istable(L, -1)) { /* not found? */ + lua_pop(L, 1); /* remove previous result */ + /* try global variable (and create one if it does not exist) */ + if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, size) != NULL) + luaL_error(L, "name conflict for module " LUA_QS, libname); + lua_pushvalue(L, -1); + lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */ + } + lua_remove(L, -2); /* remove _LOADED table */ + lua_insert(L, -(nup+1)); /* move library table to below upvalues */ + } + for (; l->name; l++) { + int i; + for (i=0; ifunc, nup); + lua_setfield(L, -(nup+2), l->name); + } + lua_pop(L, nup); /* remove upvalues */ +} + + + +/* +** {====================================================== +** getn-setn: size for arrays +** ======================================================= +*/ + +#if defined(LUA_COMPAT_GETN) + +static int checkint (lua_State *L, int topop) { + int n = (lua_type(L, -1) == LUA_TNUMBER) ? lua_tointeger(L, -1) : -1; + lua_pop(L, topop); + return n; +} + + +static void getsizes (lua_State *L) { + lua_getfield(L, LUA_REGISTRYINDEX, "LUA_SIZES"); + if (lua_isnil(L, -1)) { /* no `size' table? */ + lua_pop(L, 1); /* remove nil */ + lua_newtable(L); /* create it */ + lua_pushvalue(L, -1); /* `size' will be its own metatable */ + lua_setmetatable(L, -2); + lua_pushliteral(L, "kv"); + lua_setfield(L, -2, "__mode"); /* metatable(N).__mode = "kv" */ + lua_pushvalue(L, -1); + lua_setfield(L, LUA_REGISTRYINDEX, "LUA_SIZES"); /* store in register */ + } +} + + +LUALIB_API void luaL_setn (lua_State *L, int t, int n) { + t = abs_index(L, t); + lua_pushliteral(L, "n"); + lua_rawget(L, t); + if (checkint(L, 1) >= 0) { /* is there a numeric field `n'? */ + lua_pushliteral(L, "n"); /* use it */ + lua_pushinteger(L, n); + lua_rawset(L, t); + } + else { /* use `sizes' */ + getsizes(L); + lua_pushvalue(L, t); + lua_pushinteger(L, n); + lua_rawset(L, -3); /* sizes[t] = n */ + lua_pop(L, 1); /* remove `sizes' */ + } +} + + +LUALIB_API int luaL_getn (lua_State *L, int t) { + int n; + t = abs_index(L, t); + lua_pushliteral(L, "n"); /* try t.n */ + lua_rawget(L, t); + if ((n = checkint(L, 1)) >= 0) return n; + getsizes(L); /* else try sizes[t] */ + lua_pushvalue(L, t); + lua_rawget(L, -2); + if ((n = checkint(L, 2)) >= 0) return n; + return (int)lua_objlen(L, t); +} + +#endif + +/* }====================================================== */ + + + +LUALIB_API const char *luaL_gsub (lua_State *L, const char *s, const char *p, + const char *r) { + const char *wild; + size_t l = strlen(p); + luaL_Buffer b; + luaL_buffinit(L, &b); + while ((wild = strstr(s, p)) != NULL) { + luaL_addlstring(&b, s, wild - s); /* push prefix */ + luaL_addstring(&b, r); /* push replacement in place of pattern */ + s = wild + l; /* continue after `p' */ + } + luaL_addstring(&b, s); /* push last suffix */ + luaL_pushresult(&b); + return lua_tostring(L, -1); +} + + +LUALIB_API const char *luaL_findtable (lua_State *L, int idx, + const char *fname, int szhint) { + const char *e; + lua_pushvalue(L, idx); + do { + e = strchr(fname, '.'); + if (e == NULL) e = fname + strlen(fname); + lua_pushlstring(L, fname, e - fname); + lua_rawget(L, -2); + if (lua_isnil(L, -1)) { /* no such field? */ + lua_pop(L, 1); /* remove this nil */ + lua_createtable(L, 0, (*e == '.' ? 1 : szhint)); /* new table for field */ + lua_pushlstring(L, fname, e - fname); + lua_pushvalue(L, -2); + lua_settable(L, -4); /* set new table into field */ + } + else if (!lua_istable(L, -1)) { /* field has a non-table value? */ + lua_pop(L, 2); /* remove table and value */ + return fname; /* return problematic part of the name */ + } + lua_remove(L, -2); /* remove previous table */ + fname = e + 1; + } while (*e == '.'); + return NULL; +} + + + +/* +** {====================================================== +** Generic Buffer manipulation +** ======================================================= +*/ + + +#define bufflen(B) ((B)->p - (B)->buffer) +#define bufffree(B) ((size_t)(LUAL_BUFFERSIZE - bufflen(B))) + +#define LIMIT (LUA_MINSTACK/2) + + +static int emptybuffer (luaL_Buffer *B) { + size_t l = bufflen(B); + if (l == 0) return 0; /* put nothing on stack */ + else { + lua_pushlstring(B->L, B->buffer, l); + B->p = B->buffer; + B->lvl++; + return 1; + } +} + + +static void adjuststack (luaL_Buffer *B) { + if (B->lvl > 1) { + lua_State *L = B->L; + int toget = 1; /* number of levels to concat */ + size_t toplen = lua_strlen(L, -1); + do { + size_t l = lua_strlen(L, -(toget+1)); + if (B->lvl - toget + 1 >= LIMIT || toplen > l) { + toplen += l; + toget++; + } + else break; + } while (toget < B->lvl); + lua_concat(L, toget); + B->lvl = B->lvl - toget + 1; + } +} + + +LUALIB_API char *luaL_prepbuffer (luaL_Buffer *B) { + if (emptybuffer(B)) + adjuststack(B); + return B->buffer; +} + + +LUALIB_API void luaL_addlstring (luaL_Buffer *B, const char *s, size_t l) { + while (l--) + luaL_addchar(B, *s++); +} + + +LUALIB_API void luaL_addstring (luaL_Buffer *B, const char *s) { + luaL_addlstring(B, s, strlen(s)); +} + + +LUALIB_API void luaL_pushresult (luaL_Buffer *B) { + emptybuffer(B); + lua_concat(B->L, B->lvl); + B->lvl = 1; +} + + +LUALIB_API void luaL_addvalue (luaL_Buffer *B) { + lua_State *L = B->L; + size_t vl; + const char *s = lua_tolstring(L, -1, &vl); + if (vl <= bufffree(B)) { /* fit into buffer? */ + memcpy(B->p, s, vl); /* put it there */ + B->p += vl; + lua_pop(L, 1); /* remove from stack */ + } + else { + if (emptybuffer(B)) + lua_insert(L, -2); /* put buffer before new value */ + B->lvl++; /* add new value into B stack */ + adjuststack(B); + } +} + + +LUALIB_API void luaL_buffinit (lua_State *L, luaL_Buffer *B) { + B->L = L; + B->p = B->buffer; + B->lvl = 0; +} + +/* }====================================================== */ + + +LUALIB_API int luaL_ref (lua_State *L, int t) { + int ref; + t = abs_index(L, t); + if (lua_isnil(L, -1)) { + lua_pop(L, 1); /* remove from stack */ + return LUA_REFNIL; /* `nil' has a unique fixed reference */ + } + lua_rawgeti(L, t, FREELIST_REF); /* get first free element */ + ref = (int)lua_tointeger(L, -1); /* ref = t[FREELIST_REF] */ + lua_pop(L, 1); /* remove it from stack */ + if (ref != 0) { /* any free element? */ + lua_rawgeti(L, t, ref); /* remove it from list */ + lua_rawseti(L, t, FREELIST_REF); /* (t[FREELIST_REF] = t[ref]) */ + } + else { /* no free elements */ + ref = (int)lua_objlen(L, t); + ref++; /* create new reference */ + } + lua_rawseti(L, t, ref); + return ref; +} + + +LUALIB_API void luaL_unref (lua_State *L, int t, int ref) { + if (ref >= 0) { + t = abs_index(L, t); + lua_rawgeti(L, t, FREELIST_REF); + lua_rawseti(L, t, ref); /* t[ref] = t[FREELIST_REF] */ + lua_pushinteger(L, ref); + lua_rawseti(L, t, FREELIST_REF); /* t[FREELIST_REF] = ref */ + } +} + + + +/* +** {====================================================== +** Load functions +** ======================================================= +*/ + +typedef struct LoadF { + int extraline; + FILE *f; + char buff[LUAL_BUFFERSIZE]; +} LoadF; + + +static const char *getF (lua_State *L, void *ud, size_t *size) { + LoadF *lf = (LoadF *)ud; + (void)L; + if (lf->extraline) { + lf->extraline = 0; + *size = 1; + return "\n"; + } + if (feof(lf->f)) return NULL; + *size = fread(lf->buff, 1, sizeof(lf->buff), lf->f); + return (*size > 0) ? lf->buff : NULL; +} + + +static int errfile (lua_State *L, const char *what, int fnameindex) { + const char *serr = strerror(errno); + const char *filename = lua_tostring(L, fnameindex) + 1; + lua_pushfstring(L, "cannot %s %s: %s", what, filename, serr); + lua_remove(L, fnameindex); + return LUA_ERRFILE; +} + + +LUALIB_API int luaL_loadfile (lua_State *L, const char *filename) { + LoadF lf; + int status, readstatus; + int c; + int fnameindex = lua_gettop(L) + 1; /* index of filename on the stack */ + lf.extraline = 0; + if (filename == NULL) { + lua_pushliteral(L, "=stdin"); + lf.f = stdin; + } + else { + lua_pushfstring(L, "@%s", filename); + lf.f = fopen(filename, "r"); + if (lf.f == NULL) return errfile(L, "open", fnameindex); + } + c = getc(lf.f); + if (c == '#') { /* Unix exec. file? */ + lf.extraline = 1; + while ((c = getc(lf.f)) != EOF && c != '\n') ; /* skip first line */ + if (c == '\n') c = getc(lf.f); + } + if (c == LUA_SIGNATURE[0] && filename) { /* binary file? */ + lf.f = freopen(filename, "rb", lf.f); /* reopen in binary mode */ + if (lf.f == NULL) return errfile(L, "reopen", fnameindex); + /* skip eventual `#!...' */ + while ((c = getc(lf.f)) != EOF && c != LUA_SIGNATURE[0]) ; + lf.extraline = 0; + } + ungetc(c, lf.f); + status = lua_load(L, getF, &lf, lua_tostring(L, -1)); + readstatus = ferror(lf.f); + if (filename) fclose(lf.f); /* close file (even in case of errors) */ + if (readstatus) { + lua_settop(L, fnameindex); /* ignore results from `lua_load' */ + return errfile(L, "read", fnameindex); + } + lua_remove(L, fnameindex); + return status; +} + + +typedef struct LoadS { + const char *s; + size_t size; +} LoadS; + + +static const char *getS (lua_State *L, void *ud, size_t *size) { + LoadS *ls = (LoadS *)ud; + (void)L; + if (ls->size == 0) return NULL; + *size = ls->size; + ls->size = 0; + return ls->s; +} + + +LUALIB_API int luaL_loadbuffer (lua_State *L, const char *buff, size_t size, + const char *name) { + LoadS ls; + ls.s = buff; + ls.size = size; + return lua_load(L, getS, &ls, name); +} + + +LUALIB_API int (luaL_loadstring) (lua_State *L, const char *s) { + return luaL_loadbuffer(L, s, strlen(s), s); +} + + + +/* }====================================================== */ + + +static void *l_alloc (void *ud, void *ptr, size_t osize, size_t nsize) { + (void)ud; + (void)osize; + if (nsize == 0) { + free(ptr); + return NULL; + } + else + return realloc(ptr, nsize); +} + + +static int panic (lua_State *L) { + (void)L; /* to avoid warnings */ + fprintf(stderr, "PANIC: unprotected error in call to Lua API (%s)\n", + lua_tostring(L, -1)); + return 0; +} + + +LUALIB_API lua_State *luaL_newstate (void) { + lua_State *L = lua_newstate(l_alloc, NULL); + if (L) lua_atpanic(L, &panic); + return L; +} + diff --git a/deps/lua/src/lauxlib.h b/deps/lua/src/lauxlib.h new file mode 100644 index 0000000..3425823 --- /dev/null +++ b/deps/lua/src/lauxlib.h @@ -0,0 +1,174 @@ +/* +** $Id: lauxlib.h,v 1.88.1.1 2007/12/27 13:02:25 roberto Exp $ +** Auxiliary functions for building Lua libraries +** See Copyright Notice in lua.h +*/ + + +#ifndef lauxlib_h +#define lauxlib_h + + +#include +#include + +#include "lua.h" + + +#if defined(LUA_COMPAT_GETN) +LUALIB_API int (luaL_getn) (lua_State *L, int t); +LUALIB_API void (luaL_setn) (lua_State *L, int t, int n); +#else +#define luaL_getn(L,i) ((int)lua_objlen(L, i)) +#define luaL_setn(L,i,j) ((void)0) /* no op! */ +#endif + +#if defined(LUA_COMPAT_OPENLIB) +#define luaI_openlib luaL_openlib +#endif + + +/* extra error code for `luaL_load' */ +#define LUA_ERRFILE (LUA_ERRERR+1) + + +typedef struct luaL_Reg { + const char *name; + lua_CFunction func; +} luaL_Reg; + + + +LUALIB_API void (luaI_openlib) (lua_State *L, const char *libname, + const luaL_Reg *l, int nup); +LUALIB_API void (luaL_register) (lua_State *L, const char *libname, + const luaL_Reg *l); +LUALIB_API int (luaL_getmetafield) (lua_State *L, int obj, const char *e); +LUALIB_API int (luaL_callmeta) (lua_State *L, int obj, const char *e); +LUALIB_API int (luaL_typerror) (lua_State *L, int narg, const char *tname); +LUALIB_API int (luaL_argerror) (lua_State *L, int numarg, const char *extramsg); +LUALIB_API const char *(luaL_checklstring) (lua_State *L, int numArg, + size_t *l); +LUALIB_API const char *(luaL_optlstring) (lua_State *L, int numArg, + const char *def, size_t *l); +LUALIB_API lua_Number (luaL_checknumber) (lua_State *L, int numArg); +LUALIB_API lua_Number (luaL_optnumber) (lua_State *L, int nArg, lua_Number def); + +LUALIB_API lua_Integer (luaL_checkinteger) (lua_State *L, int numArg); +LUALIB_API lua_Integer (luaL_optinteger) (lua_State *L, int nArg, + lua_Integer def); + +LUALIB_API void (luaL_checkstack) (lua_State *L, int sz, const char *msg); +LUALIB_API void (luaL_checktype) (lua_State *L, int narg, int t); +LUALIB_API void (luaL_checkany) (lua_State *L, int narg); + +LUALIB_API int (luaL_newmetatable) (lua_State *L, const char *tname); +LUALIB_API void *(luaL_checkudata) (lua_State *L, int ud, const char *tname); + +LUALIB_API void (luaL_where) (lua_State *L, int lvl); +LUALIB_API int (luaL_error) (lua_State *L, const char *fmt, ...); + +LUALIB_API int (luaL_checkoption) (lua_State *L, int narg, const char *def, + const char *const lst[]); + +LUALIB_API int (luaL_ref) (lua_State *L, int t); +LUALIB_API void (luaL_unref) (lua_State *L, int t, int ref); + +LUALIB_API int (luaL_loadfile) (lua_State *L, const char *filename); +LUALIB_API int (luaL_loadbuffer) (lua_State *L, const char *buff, size_t sz, + const char *name); +LUALIB_API int (luaL_loadstring) (lua_State *L, const char *s); + +LUALIB_API lua_State *(luaL_newstate) (void); + + +LUALIB_API const char *(luaL_gsub) (lua_State *L, const char *s, const char *p, + const char *r); + +LUALIB_API const char *(luaL_findtable) (lua_State *L, int idx, + const char *fname, int szhint); + + + + +/* +** =============================================================== +** some useful macros +** =============================================================== +*/ + +#define luaL_argcheck(L, cond,numarg,extramsg) \ + ((void)((cond) || luaL_argerror(L, (numarg), (extramsg)))) +#define luaL_checkstring(L,n) (luaL_checklstring(L, (n), NULL)) +#define luaL_optstring(L,n,d) (luaL_optlstring(L, (n), (d), NULL)) +#define luaL_checkint(L,n) ((int)luaL_checkinteger(L, (n))) +#define luaL_optint(L,n,d) ((int)luaL_optinteger(L, (n), (d))) +#define luaL_checklong(L,n) ((long)luaL_checkinteger(L, (n))) +#define luaL_optlong(L,n,d) ((long)luaL_optinteger(L, (n), (d))) + +#define luaL_typename(L,i) lua_typename(L, lua_type(L,(i))) + +#define luaL_dofile(L, fn) \ + (luaL_loadfile(L, fn) || lua_pcall(L, 0, LUA_MULTRET, 0)) + +#define luaL_dostring(L, s) \ + (luaL_loadstring(L, s) || lua_pcall(L, 0, LUA_MULTRET, 0)) + +#define luaL_getmetatable(L,n) (lua_getfield(L, LUA_REGISTRYINDEX, (n))) + +#define luaL_opt(L,f,n,d) (lua_isnoneornil(L,(n)) ? (d) : f(L,(n))) + +/* +** {====================================================== +** Generic Buffer manipulation +** ======================================================= +*/ + + + +typedef struct luaL_Buffer { + char *p; /* current position in buffer */ + int lvl; /* number of strings in the stack (level) */ + lua_State *L; + char buffer[LUAL_BUFFERSIZE]; +} luaL_Buffer; + +#define luaL_addchar(B,c) \ + ((void)((B)->p < ((B)->buffer+LUAL_BUFFERSIZE) || luaL_prepbuffer(B)), \ + (*(B)->p++ = (char)(c))) + +/* compatibility only */ +#define luaL_putchar(B,c) luaL_addchar(B,c) + +#define luaL_addsize(B,n) ((B)->p += (n)) + +LUALIB_API void (luaL_buffinit) (lua_State *L, luaL_Buffer *B); +LUALIB_API char *(luaL_prepbuffer) (luaL_Buffer *B); +LUALIB_API void (luaL_addlstring) (luaL_Buffer *B, const char *s, size_t l); +LUALIB_API void (luaL_addstring) (luaL_Buffer *B, const char *s); +LUALIB_API void (luaL_addvalue) (luaL_Buffer *B); +LUALIB_API void (luaL_pushresult) (luaL_Buffer *B); + + +/* }====================================================== */ + + +/* compatibility with ref system */ + +/* pre-defined references */ +#define LUA_NOREF (-2) +#define LUA_REFNIL (-1) + +#define lua_ref(L,lock) ((lock) ? luaL_ref(L, LUA_REGISTRYINDEX) : \ + (lua_pushstring(L, "unlocked references are obsolete"), lua_error(L), 0)) + +#define lua_unref(L,ref) luaL_unref(L, LUA_REGISTRYINDEX, (ref)) + +#define lua_getref(L,ref) lua_rawgeti(L, LUA_REGISTRYINDEX, (ref)) + + +#define luaL_reg luaL_Reg + +#endif + + diff --git a/deps/lua/src/lbaselib.c b/deps/lua/src/lbaselib.c new file mode 100644 index 0000000..2ab550b --- /dev/null +++ b/deps/lua/src/lbaselib.c @@ -0,0 +1,653 @@ +/* +** $Id: lbaselib.c,v 1.191.1.6 2008/02/14 16:46:22 roberto Exp $ +** Basic library +** See Copyright Notice in lua.h +*/ + + + +#include +#include +#include +#include + +#define lbaselib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + + + +/* +** If your system does not support `stdout', you can just remove this function. +** If you need, you can define your own `print' function, following this +** model but changing `fputs' to put the strings at a proper place +** (a console window or a log file, for instance). +*/ +static int luaB_print (lua_State *L) { + int n = lua_gettop(L); /* number of arguments */ + int i; + lua_getglobal(L, "tostring"); + for (i=1; i<=n; i++) { + const char *s; + lua_pushvalue(L, -1); /* function to be called */ + lua_pushvalue(L, i); /* value to print */ + lua_call(L, 1, 1); + s = lua_tostring(L, -1); /* get result */ + if (s == NULL) + return luaL_error(L, LUA_QL("tostring") " must return a string to " + LUA_QL("print")); + if (i>1) fputs("\t", stdout); + fputs(s, stdout); + lua_pop(L, 1); /* pop result */ + } + fputs("\n", stdout); + return 0; +} + + +static int luaB_tonumber (lua_State *L) { + int base = luaL_optint(L, 2, 10); + if (base == 10) { /* standard conversion */ + luaL_checkany(L, 1); + if (lua_isnumber(L, 1)) { + lua_pushnumber(L, lua_tonumber(L, 1)); + return 1; + } + } + else { + const char *s1 = luaL_checkstring(L, 1); + char *s2; + unsigned long n; + luaL_argcheck(L, 2 <= base && base <= 36, 2, "base out of range"); + n = strtoul(s1, &s2, base); + if (s1 != s2) { /* at least one valid digit? */ + while (isspace((unsigned char)(*s2))) s2++; /* skip trailing spaces */ + if (*s2 == '\0') { /* no invalid trailing characters? */ + lua_pushnumber(L, (lua_Number)n); + return 1; + } + } + } + lua_pushnil(L); /* else not a number */ + return 1; +} + + +static int luaB_error (lua_State *L) { + int level = luaL_optint(L, 2, 1); + lua_settop(L, 1); + if (lua_isstring(L, 1) && level > 0) { /* add extra information? */ + luaL_where(L, level); + lua_pushvalue(L, 1); + lua_concat(L, 2); + } + return lua_error(L); +} + + +static int luaB_getmetatable (lua_State *L) { + luaL_checkany(L, 1); + if (!lua_getmetatable(L, 1)) { + lua_pushnil(L); + return 1; /* no metatable */ + } + luaL_getmetafield(L, 1, "__metatable"); + return 1; /* returns either __metatable field (if present) or metatable */ +} + + +static int luaB_setmetatable (lua_State *L) { + int t = lua_type(L, 2); + luaL_checktype(L, 1, LUA_TTABLE); + luaL_argcheck(L, t == LUA_TNIL || t == LUA_TTABLE, 2, + "nil or table expected"); + if (luaL_getmetafield(L, 1, "__metatable")) + luaL_error(L, "cannot change a protected metatable"); + lua_settop(L, 2); + lua_setmetatable(L, 1); + return 1; +} + + +static void getfunc (lua_State *L, int opt) { + if (lua_isfunction(L, 1)) lua_pushvalue(L, 1); + else { + lua_Debug ar; + int level = opt ? luaL_optint(L, 1, 1) : luaL_checkint(L, 1); + luaL_argcheck(L, level >= 0, 1, "level must be non-negative"); + if (lua_getstack(L, level, &ar) == 0) + luaL_argerror(L, 1, "invalid level"); + lua_getinfo(L, "f", &ar); + if (lua_isnil(L, -1)) + luaL_error(L, "no function environment for tail call at level %d", + level); + } +} + + +static int luaB_getfenv (lua_State *L) { + getfunc(L, 1); + if (lua_iscfunction(L, -1)) /* is a C function? */ + lua_pushvalue(L, LUA_GLOBALSINDEX); /* return the thread's global env. */ + else + lua_getfenv(L, -1); + return 1; +} + + +static int luaB_setfenv (lua_State *L) { + luaL_checktype(L, 2, LUA_TTABLE); + getfunc(L, 0); + lua_pushvalue(L, 2); + if (lua_isnumber(L, 1) && lua_tonumber(L, 1) == 0) { + /* change environment of current thread */ + lua_pushthread(L); + lua_insert(L, -2); + lua_setfenv(L, -2); + return 0; + } + else if (lua_iscfunction(L, -2) || lua_setfenv(L, -2) == 0) + luaL_error(L, + LUA_QL("setfenv") " cannot change environment of given object"); + return 1; +} + + +static int luaB_rawequal (lua_State *L) { + luaL_checkany(L, 1); + luaL_checkany(L, 2); + lua_pushboolean(L, lua_rawequal(L, 1, 2)); + return 1; +} + + +static int luaB_rawget (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + luaL_checkany(L, 2); + lua_settop(L, 2); + lua_rawget(L, 1); + return 1; +} + +static int luaB_rawset (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + luaL_checkany(L, 2); + luaL_checkany(L, 3); + lua_settop(L, 3); + lua_rawset(L, 1); + return 1; +} + + +static int luaB_gcinfo (lua_State *L) { + lua_pushinteger(L, lua_getgccount(L)); + return 1; +} + + +static int luaB_collectgarbage (lua_State *L) { + static const char *const opts[] = {"stop", "restart", "collect", + "count", "step", "setpause", "setstepmul", NULL}; + static const int optsnum[] = {LUA_GCSTOP, LUA_GCRESTART, LUA_GCCOLLECT, + LUA_GCCOUNT, LUA_GCSTEP, LUA_GCSETPAUSE, LUA_GCSETSTEPMUL}; + int o = luaL_checkoption(L, 1, "collect", opts); + int ex = luaL_optint(L, 2, 0); + int res = lua_gc(L, optsnum[o], ex); + switch (optsnum[o]) { + case LUA_GCCOUNT: { + int b = lua_gc(L, LUA_GCCOUNTB, 0); + lua_pushnumber(L, res + ((lua_Number)b/1024)); + return 1; + } + case LUA_GCSTEP: { + lua_pushboolean(L, res); + return 1; + } + default: { + lua_pushnumber(L, res); + return 1; + } + } +} + + +static int luaB_type (lua_State *L) { + luaL_checkany(L, 1); + lua_pushstring(L, luaL_typename(L, 1)); + return 1; +} + + +static int luaB_next (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + lua_settop(L, 2); /* create a 2nd argument if there isn't one */ + if (lua_next(L, 1)) + return 2; + else { + lua_pushnil(L); + return 1; + } +} + + +static int luaB_pairs (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + lua_pushvalue(L, lua_upvalueindex(1)); /* return generator, */ + lua_pushvalue(L, 1); /* state, */ + lua_pushnil(L); /* and initial value */ + return 3; +} + + +static int ipairsaux (lua_State *L) { + int i = luaL_checkint(L, 2); + luaL_checktype(L, 1, LUA_TTABLE); + i++; /* next value */ + lua_pushinteger(L, i); + lua_rawgeti(L, 1, i); + return (lua_isnil(L, -1)) ? 0 : 2; +} + + +static int luaB_ipairs (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + lua_pushvalue(L, lua_upvalueindex(1)); /* return generator, */ + lua_pushvalue(L, 1); /* state, */ + lua_pushinteger(L, 0); /* and initial value */ + return 3; +} + + +static int load_aux (lua_State *L, int status) { + if (status == 0) /* OK? */ + return 1; + else { + lua_pushnil(L); + lua_insert(L, -2); /* put before error message */ + return 2; /* return nil plus error message */ + } +} + + +static int luaB_loadstring (lua_State *L) { + size_t l; + const char *s = luaL_checklstring(L, 1, &l); + const char *chunkname = luaL_optstring(L, 2, s); + return load_aux(L, luaL_loadbuffer(L, s, l, chunkname)); +} + + +static int luaB_loadfile (lua_State *L) { + const char *fname = luaL_optstring(L, 1, NULL); + return load_aux(L, luaL_loadfile(L, fname)); +} + + +/* +** Reader for generic `load' function: `lua_load' uses the +** stack for internal stuff, so the reader cannot change the +** stack top. Instead, it keeps its resulting string in a +** reserved slot inside the stack. +*/ +static const char *generic_reader (lua_State *L, void *ud, size_t *size) { + (void)ud; /* to avoid warnings */ + luaL_checkstack(L, 2, "too many nested functions"); + lua_pushvalue(L, 1); /* get function */ + lua_call(L, 0, 1); /* call it */ + if (lua_isnil(L, -1)) { + *size = 0; + return NULL; + } + else if (lua_isstring(L, -1)) { + lua_replace(L, 3); /* save string in a reserved stack slot */ + return lua_tolstring(L, 3, size); + } + else luaL_error(L, "reader function must return a string"); + return NULL; /* to avoid warnings */ +} + + +static int luaB_load (lua_State *L) { + int status; + const char *cname = luaL_optstring(L, 2, "=(load)"); + luaL_checktype(L, 1, LUA_TFUNCTION); + lua_settop(L, 3); /* function, eventual name, plus one reserved slot */ + status = lua_load(L, generic_reader, NULL, cname); + return load_aux(L, status); +} + + +static int luaB_dofile (lua_State *L) { + const char *fname = luaL_optstring(L, 1, NULL); + int n = lua_gettop(L); + if (luaL_loadfile(L, fname) != 0) lua_error(L); + lua_call(L, 0, LUA_MULTRET); + return lua_gettop(L) - n; +} + + +static int luaB_assert (lua_State *L) { + luaL_checkany(L, 1); + if (!lua_toboolean(L, 1)) + return luaL_error(L, "%s", luaL_optstring(L, 2, "assertion failed!")); + return lua_gettop(L); +} + + +static int luaB_unpack (lua_State *L) { + int i, e, n; + luaL_checktype(L, 1, LUA_TTABLE); + i = luaL_optint(L, 2, 1); + e = luaL_opt(L, luaL_checkint, 3, luaL_getn(L, 1)); + if (i > e) return 0; /* empty range */ + n = e - i + 1; /* number of elements */ + if (n <= 0 || !lua_checkstack(L, n)) /* n <= 0 means arith. overflow */ + return luaL_error(L, "too many results to unpack"); + lua_rawgeti(L, 1, i); /* push arg[i] (avoiding overflow problems) */ + while (i++ < e) /* push arg[i + 1...e] */ + lua_rawgeti(L, 1, i); + return n; +} + + +static int luaB_select (lua_State *L) { + int n = lua_gettop(L); + if (lua_type(L, 1) == LUA_TSTRING && *lua_tostring(L, 1) == '#') { + lua_pushinteger(L, n-1); + return 1; + } + else { + int i = luaL_checkint(L, 1); + if (i < 0) i = n + i; + else if (i > n) i = n; + luaL_argcheck(L, 1 <= i, 1, "index out of range"); + return n - i; + } +} + + +static int luaB_pcall (lua_State *L) { + int status; + luaL_checkany(L, 1); + status = lua_pcall(L, lua_gettop(L) - 1, LUA_MULTRET, 0); + lua_pushboolean(L, (status == 0)); + lua_insert(L, 1); + return lua_gettop(L); /* return status + all results */ +} + + +static int luaB_xpcall (lua_State *L) { + int status; + luaL_checkany(L, 2); + lua_settop(L, 2); + lua_insert(L, 1); /* put error function under function to be called */ + status = lua_pcall(L, 0, LUA_MULTRET, 1); + lua_pushboolean(L, (status == 0)); + lua_replace(L, 1); + return lua_gettop(L); /* return status + all results */ +} + + +static int luaB_tostring (lua_State *L) { + luaL_checkany(L, 1); + if (luaL_callmeta(L, 1, "__tostring")) /* is there a metafield? */ + return 1; /* use its value */ + switch (lua_type(L, 1)) { + case LUA_TNUMBER: + lua_pushstring(L, lua_tostring(L, 1)); + break; + case LUA_TSTRING: + lua_pushvalue(L, 1); + break; + case LUA_TBOOLEAN: + lua_pushstring(L, (lua_toboolean(L, 1) ? "true" : "false")); + break; + case LUA_TNIL: + lua_pushliteral(L, "nil"); + break; + default: + lua_pushfstring(L, "%s: %p", luaL_typename(L, 1), lua_topointer(L, 1)); + break; + } + return 1; +} + + +static int luaB_newproxy (lua_State *L) { + lua_settop(L, 1); + lua_newuserdata(L, 0); /* create proxy */ + if (lua_toboolean(L, 1) == 0) + return 1; /* no metatable */ + else if (lua_isboolean(L, 1)) { + lua_newtable(L); /* create a new metatable `m' ... */ + lua_pushvalue(L, -1); /* ... and mark `m' as a valid metatable */ + lua_pushboolean(L, 1); + lua_rawset(L, lua_upvalueindex(1)); /* weaktable[m] = true */ + } + else { + int validproxy = 0; /* to check if weaktable[metatable(u)] == true */ + if (lua_getmetatable(L, 1)) { + lua_rawget(L, lua_upvalueindex(1)); + validproxy = lua_toboolean(L, -1); + lua_pop(L, 1); /* remove value */ + } + luaL_argcheck(L, validproxy, 1, "boolean or proxy expected"); + lua_getmetatable(L, 1); /* metatable is valid; get it */ + } + lua_setmetatable(L, 2); + return 1; +} + + +static const luaL_Reg base_funcs[] = { + {"assert", luaB_assert}, + {"collectgarbage", luaB_collectgarbage}, + {"dofile", luaB_dofile}, + {"error", luaB_error}, + {"gcinfo", luaB_gcinfo}, + {"getfenv", luaB_getfenv}, + {"getmetatable", luaB_getmetatable}, + {"loadfile", luaB_loadfile}, + {"load", luaB_load}, + {"loadstring", luaB_loadstring}, + {"next", luaB_next}, + {"pcall", luaB_pcall}, + {"print", luaB_print}, + {"rawequal", luaB_rawequal}, + {"rawget", luaB_rawget}, + {"rawset", luaB_rawset}, + {"select", luaB_select}, + {"setfenv", luaB_setfenv}, + {"setmetatable", luaB_setmetatable}, + {"tonumber", luaB_tonumber}, + {"tostring", luaB_tostring}, + {"type", luaB_type}, + {"unpack", luaB_unpack}, + {"xpcall", luaB_xpcall}, + {NULL, NULL} +}; + + +/* +** {====================================================== +** Coroutine library +** ======================================================= +*/ + +#define CO_RUN 0 /* running */ +#define CO_SUS 1 /* suspended */ +#define CO_NOR 2 /* 'normal' (it resumed another coroutine) */ +#define CO_DEAD 3 + +static const char *const statnames[] = + {"running", "suspended", "normal", "dead"}; + +static int costatus (lua_State *L, lua_State *co) { + if (L == co) return CO_RUN; + switch (lua_status(co)) { + case LUA_YIELD: + return CO_SUS; + case 0: { + lua_Debug ar; + if (lua_getstack(co, 0, &ar) > 0) /* does it have frames? */ + return CO_NOR; /* it is running */ + else if (lua_gettop(co) == 0) + return CO_DEAD; + else + return CO_SUS; /* initial state */ + } + default: /* some error occured */ + return CO_DEAD; + } +} + + +static int luaB_costatus (lua_State *L) { + lua_State *co = lua_tothread(L, 1); + luaL_argcheck(L, co, 1, "coroutine expected"); + lua_pushstring(L, statnames[costatus(L, co)]); + return 1; +} + + +static int auxresume (lua_State *L, lua_State *co, int narg) { + int status = costatus(L, co); + if (!lua_checkstack(co, narg)) + luaL_error(L, "too many arguments to resume"); + if (status != CO_SUS) { + lua_pushfstring(L, "cannot resume %s coroutine", statnames[status]); + return -1; /* error flag */ + } + lua_xmove(L, co, narg); + lua_setlevel(L, co); + status = lua_resume(co, narg); + if (status == 0 || status == LUA_YIELD) { + int nres = lua_gettop(co); + if (!lua_checkstack(L, nres + 1)) + luaL_error(L, "too many results to resume"); + lua_xmove(co, L, nres); /* move yielded values */ + return nres; + } + else { + lua_xmove(co, L, 1); /* move error message */ + return -1; /* error flag */ + } +} + + +static int luaB_coresume (lua_State *L) { + lua_State *co = lua_tothread(L, 1); + int r; + luaL_argcheck(L, co, 1, "coroutine expected"); + r = auxresume(L, co, lua_gettop(L) - 1); + if (r < 0) { + lua_pushboolean(L, 0); + lua_insert(L, -2); + return 2; /* return false + error message */ + } + else { + lua_pushboolean(L, 1); + lua_insert(L, -(r + 1)); + return r + 1; /* return true + `resume' returns */ + } +} + + +static int luaB_auxwrap (lua_State *L) { + lua_State *co = lua_tothread(L, lua_upvalueindex(1)); + int r = auxresume(L, co, lua_gettop(L)); + if (r < 0) { + if (lua_isstring(L, -1)) { /* error object is a string? */ + luaL_where(L, 1); /* add extra info */ + lua_insert(L, -2); + lua_concat(L, 2); + } + lua_error(L); /* propagate error */ + } + return r; +} + + +static int luaB_cocreate (lua_State *L) { + lua_State *NL = lua_newthread(L); + luaL_argcheck(L, lua_isfunction(L, 1) && !lua_iscfunction(L, 1), 1, + "Lua function expected"); + lua_pushvalue(L, 1); /* move function to top */ + lua_xmove(L, NL, 1); /* move function from L to NL */ + return 1; +} + + +static int luaB_cowrap (lua_State *L) { + luaB_cocreate(L); + lua_pushcclosure(L, luaB_auxwrap, 1); + return 1; +} + + +static int luaB_yield (lua_State *L) { + return lua_yield(L, lua_gettop(L)); +} + + +static int luaB_corunning (lua_State *L) { + if (lua_pushthread(L)) + lua_pushnil(L); /* main thread is not a coroutine */ + return 1; +} + + +static const luaL_Reg co_funcs[] = { + {"create", luaB_cocreate}, + {"resume", luaB_coresume}, + {"running", luaB_corunning}, + {"status", luaB_costatus}, + {"wrap", luaB_cowrap}, + {"yield", luaB_yield}, + {NULL, NULL} +}; + +/* }====================================================== */ + + +static void auxopen (lua_State *L, const char *name, + lua_CFunction f, lua_CFunction u) { + lua_pushcfunction(L, u); + lua_pushcclosure(L, f, 1); + lua_setfield(L, -2, name); +} + + +static void base_open (lua_State *L) { + /* set global _G */ + lua_pushvalue(L, LUA_GLOBALSINDEX); + lua_setglobal(L, "_G"); + /* open lib into global table */ + luaL_register(L, "_G", base_funcs); + lua_pushliteral(L, LUA_VERSION); + lua_setglobal(L, "_VERSION"); /* set global _VERSION */ + /* `ipairs' and `pairs' need auxiliary functions as upvalues */ + auxopen(L, "ipairs", luaB_ipairs, ipairsaux); + auxopen(L, "pairs", luaB_pairs, luaB_next); + /* `newproxy' needs a weaktable as upvalue */ + lua_createtable(L, 0, 1); /* new table `w' */ + lua_pushvalue(L, -1); /* `w' will be its own metatable */ + lua_setmetatable(L, -2); + lua_pushliteral(L, "kv"); + lua_setfield(L, -2, "__mode"); /* metatable(w).__mode = "kv" */ + lua_pushcclosure(L, luaB_newproxy, 1); + lua_setglobal(L, "newproxy"); /* set global `newproxy' */ +} + + +LUALIB_API int luaopen_base (lua_State *L) { + base_open(L); + luaL_register(L, LUA_COLIBNAME, co_funcs); + return 2; +} + diff --git a/deps/lua/src/lcode.c b/deps/lua/src/lcode.c new file mode 100644 index 0000000..679cb9c --- /dev/null +++ b/deps/lua/src/lcode.c @@ -0,0 +1,831 @@ +/* +** $Id: lcode.c,v 2.25.1.5 2011/01/31 14:53:16 roberto Exp $ +** Code generator for Lua +** See Copyright Notice in lua.h +*/ + + +#include + +#define lcode_c +#define LUA_CORE + +#include "lua.h" + +#include "lcode.h" +#include "ldebug.h" +#include "ldo.h" +#include "lgc.h" +#include "llex.h" +#include "lmem.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lparser.h" +#include "ltable.h" + + +#define hasjumps(e) ((e)->t != (e)->f) + + +static int isnumeral(expdesc *e) { + return (e->k == VKNUM && e->t == NO_JUMP && e->f == NO_JUMP); +} + + +void luaK_nil (FuncState *fs, int from, int n) { + Instruction *previous; + if (fs->pc > fs->lasttarget) { /* no jumps to current position? */ + if (fs->pc == 0) { /* function start? */ + if (from >= fs->nactvar) + return; /* positions are already clean */ + } + else { + previous = &fs->f->code[fs->pc-1]; + if (GET_OPCODE(*previous) == OP_LOADNIL) { + int pfrom = GETARG_A(*previous); + int pto = GETARG_B(*previous); + if (pfrom <= from && from <= pto+1) { /* can connect both? */ + if (from+n-1 > pto) + SETARG_B(*previous, from+n-1); + return; + } + } + } + } + luaK_codeABC(fs, OP_LOADNIL, from, from+n-1, 0); /* else no optimization */ +} + + +int luaK_jump (FuncState *fs) { + int jpc = fs->jpc; /* save list of jumps to here */ + int j; + fs->jpc = NO_JUMP; + j = luaK_codeAsBx(fs, OP_JMP, 0, NO_JUMP); + luaK_concat(fs, &j, jpc); /* keep them on hold */ + return j; +} + + +void luaK_ret (FuncState *fs, int first, int nret) { + luaK_codeABC(fs, OP_RETURN, first, nret+1, 0); +} + + +static int condjump (FuncState *fs, OpCode op, int A, int B, int C) { + luaK_codeABC(fs, op, A, B, C); + return luaK_jump(fs); +} + + +static void fixjump (FuncState *fs, int pc, int dest) { + Instruction *jmp = &fs->f->code[pc]; + int offset = dest-(pc+1); + lua_assert(dest != NO_JUMP); + if (abs(offset) > MAXARG_sBx) + luaX_syntaxerror(fs->ls, "control structure too long"); + SETARG_sBx(*jmp, offset); +} + + +/* +** returns current `pc' and marks it as a jump target (to avoid wrong +** optimizations with consecutive instructions not in the same basic block). +*/ +int luaK_getlabel (FuncState *fs) { + fs->lasttarget = fs->pc; + return fs->pc; +} + + +static int getjump (FuncState *fs, int pc) { + int offset = GETARG_sBx(fs->f->code[pc]); + if (offset == NO_JUMP) /* point to itself represents end of list */ + return NO_JUMP; /* end of list */ + else + return (pc+1)+offset; /* turn offset into absolute position */ +} + + +static Instruction *getjumpcontrol (FuncState *fs, int pc) { + Instruction *pi = &fs->f->code[pc]; + if (pc >= 1 && testTMode(GET_OPCODE(*(pi-1)))) + return pi-1; + else + return pi; +} + + +/* +** check whether list has any jump that do not produce a value +** (or produce an inverted value) +*/ +static int need_value (FuncState *fs, int list) { + for (; list != NO_JUMP; list = getjump(fs, list)) { + Instruction i = *getjumpcontrol(fs, list); + if (GET_OPCODE(i) != OP_TESTSET) return 1; + } + return 0; /* not found */ +} + + +static int patchtestreg (FuncState *fs, int node, int reg) { + Instruction *i = getjumpcontrol(fs, node); + if (GET_OPCODE(*i) != OP_TESTSET) + return 0; /* cannot patch other instructions */ + if (reg != NO_REG && reg != GETARG_B(*i)) + SETARG_A(*i, reg); + else /* no register to put value or register already has the value */ + *i = CREATE_ABC(OP_TEST, GETARG_B(*i), 0, GETARG_C(*i)); + + return 1; +} + + +static void removevalues (FuncState *fs, int list) { + for (; list != NO_JUMP; list = getjump(fs, list)) + patchtestreg(fs, list, NO_REG); +} + + +static void patchlistaux (FuncState *fs, int list, int vtarget, int reg, + int dtarget) { + while (list != NO_JUMP) { + int next = getjump(fs, list); + if (patchtestreg(fs, list, reg)) + fixjump(fs, list, vtarget); + else + fixjump(fs, list, dtarget); /* jump to default target */ + list = next; + } +} + + +static void dischargejpc (FuncState *fs) { + patchlistaux(fs, fs->jpc, fs->pc, NO_REG, fs->pc); + fs->jpc = NO_JUMP; +} + + +void luaK_patchlist (FuncState *fs, int list, int target) { + if (target == fs->pc) + luaK_patchtohere(fs, list); + else { + lua_assert(target < fs->pc); + patchlistaux(fs, list, target, NO_REG, target); + } +} + + +void luaK_patchtohere (FuncState *fs, int list) { + luaK_getlabel(fs); + luaK_concat(fs, &fs->jpc, list); +} + + +void luaK_concat (FuncState *fs, int *l1, int l2) { + if (l2 == NO_JUMP) return; + else if (*l1 == NO_JUMP) + *l1 = l2; + else { + int list = *l1; + int next; + while ((next = getjump(fs, list)) != NO_JUMP) /* find last element */ + list = next; + fixjump(fs, list, l2); + } +} + + +void luaK_checkstack (FuncState *fs, int n) { + int newstack = fs->freereg + n; + if (newstack > fs->f->maxstacksize) { + if (newstack >= MAXSTACK) + luaX_syntaxerror(fs->ls, "function or expression too complex"); + fs->f->maxstacksize = cast_byte(newstack); + } +} + + +void luaK_reserveregs (FuncState *fs, int n) { + luaK_checkstack(fs, n); + fs->freereg += n; +} + + +static void freereg (FuncState *fs, int reg) { + if (!ISK(reg) && reg >= fs->nactvar) { + fs->freereg--; + lua_assert(reg == fs->freereg); + } +} + + +static void freeexp (FuncState *fs, expdesc *e) { + if (e->k == VNONRELOC) + freereg(fs, e->u.s.info); +} + + +static int addk (FuncState *fs, TValue *k, TValue *v) { + lua_State *L = fs->L; + TValue *idx = luaH_set(L, fs->h, k); + Proto *f = fs->f; + int oldsize = f->sizek; + if (ttisnumber(idx)) { + lua_assert(luaO_rawequalObj(&fs->f->k[cast_int(nvalue(idx))], v)); + return cast_int(nvalue(idx)); + } + else { /* constant not found; create a new entry */ + setnvalue(idx, cast_num(fs->nk)); + luaM_growvector(L, f->k, fs->nk, f->sizek, TValue, + MAXARG_Bx, "constant table overflow"); + while (oldsize < f->sizek) setnilvalue(&f->k[oldsize++]); + setobj(L, &f->k[fs->nk], v); + luaC_barrier(L, f, v); + return fs->nk++; + } +} + + +int luaK_stringK (FuncState *fs, TString *s) { + TValue o; + setsvalue(fs->L, &o, s); + return addk(fs, &o, &o); +} + + +int luaK_numberK (FuncState *fs, lua_Number r) { + TValue o; + setnvalue(&o, r); + return addk(fs, &o, &o); +} + + +static int boolK (FuncState *fs, int b) { + TValue o; + setbvalue(&o, b); + return addk(fs, &o, &o); +} + + +static int nilK (FuncState *fs) { + TValue k, v; + setnilvalue(&v); + /* cannot use nil as key; instead use table itself to represent nil */ + sethvalue(fs->L, &k, fs->h); + return addk(fs, &k, &v); +} + + +void luaK_setreturns (FuncState *fs, expdesc *e, int nresults) { + if (e->k == VCALL) { /* expression is an open function call? */ + SETARG_C(getcode(fs, e), nresults+1); + } + else if (e->k == VVARARG) { + SETARG_B(getcode(fs, e), nresults+1); + SETARG_A(getcode(fs, e), fs->freereg); + luaK_reserveregs(fs, 1); + } +} + + +void luaK_setoneret (FuncState *fs, expdesc *e) { + if (e->k == VCALL) { /* expression is an open function call? */ + e->k = VNONRELOC; + e->u.s.info = GETARG_A(getcode(fs, e)); + } + else if (e->k == VVARARG) { + SETARG_B(getcode(fs, e), 2); + e->k = VRELOCABLE; /* can relocate its simple result */ + } +} + + +void luaK_dischargevars (FuncState *fs, expdesc *e) { + switch (e->k) { + case VLOCAL: { + e->k = VNONRELOC; + break; + } + case VUPVAL: { + e->u.s.info = luaK_codeABC(fs, OP_GETUPVAL, 0, e->u.s.info, 0); + e->k = VRELOCABLE; + break; + } + case VGLOBAL: { + e->u.s.info = luaK_codeABx(fs, OP_GETGLOBAL, 0, e->u.s.info); + e->k = VRELOCABLE; + break; + } + case VINDEXED: { + freereg(fs, e->u.s.aux); + freereg(fs, e->u.s.info); + e->u.s.info = luaK_codeABC(fs, OP_GETTABLE, 0, e->u.s.info, e->u.s.aux); + e->k = VRELOCABLE; + break; + } + case VVARARG: + case VCALL: { + luaK_setoneret(fs, e); + break; + } + default: break; /* there is one value available (somewhere) */ + } +} + + +static int code_label (FuncState *fs, int A, int b, int jump) { + luaK_getlabel(fs); /* those instructions may be jump targets */ + return luaK_codeABC(fs, OP_LOADBOOL, A, b, jump); +} + + +static void discharge2reg (FuncState *fs, expdesc *e, int reg) { + luaK_dischargevars(fs, e); + switch (e->k) { + case VNIL: { + luaK_nil(fs, reg, 1); + break; + } + case VFALSE: case VTRUE: { + luaK_codeABC(fs, OP_LOADBOOL, reg, e->k == VTRUE, 0); + break; + } + case VK: { + luaK_codeABx(fs, OP_LOADK, reg, e->u.s.info); + break; + } + case VKNUM: { + luaK_codeABx(fs, OP_LOADK, reg, luaK_numberK(fs, e->u.nval)); + break; + } + case VRELOCABLE: { + Instruction *pc = &getcode(fs, e); + SETARG_A(*pc, reg); + break; + } + case VNONRELOC: { + if (reg != e->u.s.info) + luaK_codeABC(fs, OP_MOVE, reg, e->u.s.info, 0); + break; + } + default: { + lua_assert(e->k == VVOID || e->k == VJMP); + return; /* nothing to do... */ + } + } + e->u.s.info = reg; + e->k = VNONRELOC; +} + + +static void discharge2anyreg (FuncState *fs, expdesc *e) { + if (e->k != VNONRELOC) { + luaK_reserveregs(fs, 1); + discharge2reg(fs, e, fs->freereg-1); + } +} + + +static void exp2reg (FuncState *fs, expdesc *e, int reg) { + discharge2reg(fs, e, reg); + if (e->k == VJMP) + luaK_concat(fs, &e->t, e->u.s.info); /* put this jump in `t' list */ + if (hasjumps(e)) { + int final; /* position after whole expression */ + int p_f = NO_JUMP; /* position of an eventual LOAD false */ + int p_t = NO_JUMP; /* position of an eventual LOAD true */ + if (need_value(fs, e->t) || need_value(fs, e->f)) { + int fj = (e->k == VJMP) ? NO_JUMP : luaK_jump(fs); + p_f = code_label(fs, reg, 0, 1); + p_t = code_label(fs, reg, 1, 0); + luaK_patchtohere(fs, fj); + } + final = luaK_getlabel(fs); + patchlistaux(fs, e->f, final, reg, p_f); + patchlistaux(fs, e->t, final, reg, p_t); + } + e->f = e->t = NO_JUMP; + e->u.s.info = reg; + e->k = VNONRELOC; +} + + +void luaK_exp2nextreg (FuncState *fs, expdesc *e) { + luaK_dischargevars(fs, e); + freeexp(fs, e); + luaK_reserveregs(fs, 1); + exp2reg(fs, e, fs->freereg - 1); +} + + +int luaK_exp2anyreg (FuncState *fs, expdesc *e) { + luaK_dischargevars(fs, e); + if (e->k == VNONRELOC) { + if (!hasjumps(e)) return e->u.s.info; /* exp is already in a register */ + if (e->u.s.info >= fs->nactvar) { /* reg. is not a local? */ + exp2reg(fs, e, e->u.s.info); /* put value on it */ + return e->u.s.info; + } + } + luaK_exp2nextreg(fs, e); /* default */ + return e->u.s.info; +} + + +void luaK_exp2val (FuncState *fs, expdesc *e) { + if (hasjumps(e)) + luaK_exp2anyreg(fs, e); + else + luaK_dischargevars(fs, e); +} + + +int luaK_exp2RK (FuncState *fs, expdesc *e) { + luaK_exp2val(fs, e); + switch (e->k) { + case VKNUM: + case VTRUE: + case VFALSE: + case VNIL: { + if (fs->nk <= MAXINDEXRK) { /* constant fit in RK operand? */ + e->u.s.info = (e->k == VNIL) ? nilK(fs) : + (e->k == VKNUM) ? luaK_numberK(fs, e->u.nval) : + boolK(fs, (e->k == VTRUE)); + e->k = VK; + return RKASK(e->u.s.info); + } + else break; + } + case VK: { + if (e->u.s.info <= MAXINDEXRK) /* constant fit in argC? */ + return RKASK(e->u.s.info); + else break; + } + default: break; + } + /* not a constant in the right range: put it in a register */ + return luaK_exp2anyreg(fs, e); +} + + +void luaK_storevar (FuncState *fs, expdesc *var, expdesc *ex) { + switch (var->k) { + case VLOCAL: { + freeexp(fs, ex); + exp2reg(fs, ex, var->u.s.info); + return; + } + case VUPVAL: { + int e = luaK_exp2anyreg(fs, ex); + luaK_codeABC(fs, OP_SETUPVAL, e, var->u.s.info, 0); + break; + } + case VGLOBAL: { + int e = luaK_exp2anyreg(fs, ex); + luaK_codeABx(fs, OP_SETGLOBAL, e, var->u.s.info); + break; + } + case VINDEXED: { + int e = luaK_exp2RK(fs, ex); + luaK_codeABC(fs, OP_SETTABLE, var->u.s.info, var->u.s.aux, e); + break; + } + default: { + lua_assert(0); /* invalid var kind to store */ + break; + } + } + freeexp(fs, ex); +} + + +void luaK_self (FuncState *fs, expdesc *e, expdesc *key) { + int func; + luaK_exp2anyreg(fs, e); + freeexp(fs, e); + func = fs->freereg; + luaK_reserveregs(fs, 2); + luaK_codeABC(fs, OP_SELF, func, e->u.s.info, luaK_exp2RK(fs, key)); + freeexp(fs, key); + e->u.s.info = func; + e->k = VNONRELOC; +} + + +static void invertjump (FuncState *fs, expdesc *e) { + Instruction *pc = getjumpcontrol(fs, e->u.s.info); + lua_assert(testTMode(GET_OPCODE(*pc)) && GET_OPCODE(*pc) != OP_TESTSET && + GET_OPCODE(*pc) != OP_TEST); + SETARG_A(*pc, !(GETARG_A(*pc))); +} + + +static int jumponcond (FuncState *fs, expdesc *e, int cond) { + if (e->k == VRELOCABLE) { + Instruction ie = getcode(fs, e); + if (GET_OPCODE(ie) == OP_NOT) { + fs->pc--; /* remove previous OP_NOT */ + return condjump(fs, OP_TEST, GETARG_B(ie), 0, !cond); + } + /* else go through */ + } + discharge2anyreg(fs, e); + freeexp(fs, e); + return condjump(fs, OP_TESTSET, NO_REG, e->u.s.info, cond); +} + + +void luaK_goiftrue (FuncState *fs, expdesc *e) { + int pc; /* pc of last jump */ + luaK_dischargevars(fs, e); + switch (e->k) { + case VK: case VKNUM: case VTRUE: { + pc = NO_JUMP; /* always true; do nothing */ + break; + } + case VJMP: { + invertjump(fs, e); + pc = e->u.s.info; + break; + } + default: { + pc = jumponcond(fs, e, 0); + break; + } + } + luaK_concat(fs, &e->f, pc); /* insert last jump in `f' list */ + luaK_patchtohere(fs, e->t); + e->t = NO_JUMP; +} + + +static void luaK_goiffalse (FuncState *fs, expdesc *e) { + int pc; /* pc of last jump */ + luaK_dischargevars(fs, e); + switch (e->k) { + case VNIL: case VFALSE: { + pc = NO_JUMP; /* always false; do nothing */ + break; + } + case VJMP: { + pc = e->u.s.info; + break; + } + default: { + pc = jumponcond(fs, e, 1); + break; + } + } + luaK_concat(fs, &e->t, pc); /* insert last jump in `t' list */ + luaK_patchtohere(fs, e->f); + e->f = NO_JUMP; +} + + +static void codenot (FuncState *fs, expdesc *e) { + luaK_dischargevars(fs, e); + switch (e->k) { + case VNIL: case VFALSE: { + e->k = VTRUE; + break; + } + case VK: case VKNUM: case VTRUE: { + e->k = VFALSE; + break; + } + case VJMP: { + invertjump(fs, e); + break; + } + case VRELOCABLE: + case VNONRELOC: { + discharge2anyreg(fs, e); + freeexp(fs, e); + e->u.s.info = luaK_codeABC(fs, OP_NOT, 0, e->u.s.info, 0); + e->k = VRELOCABLE; + break; + } + default: { + lua_assert(0); /* cannot happen */ + break; + } + } + /* interchange true and false lists */ + { int temp = e->f; e->f = e->t; e->t = temp; } + removevalues(fs, e->f); + removevalues(fs, e->t); +} + + +void luaK_indexed (FuncState *fs, expdesc *t, expdesc *k) { + t->u.s.aux = luaK_exp2RK(fs, k); + t->k = VINDEXED; +} + + +static int constfolding (OpCode op, expdesc *e1, expdesc *e2) { + lua_Number v1, v2, r; + if (!isnumeral(e1) || !isnumeral(e2)) return 0; + v1 = e1->u.nval; + v2 = e2->u.nval; + switch (op) { + case OP_ADD: r = luai_numadd(v1, v2); break; + case OP_SUB: r = luai_numsub(v1, v2); break; + case OP_MUL: r = luai_nummul(v1, v2); break; + case OP_DIV: + if (v2 == 0) return 0; /* do not attempt to divide by 0 */ + r = luai_numdiv(v1, v2); break; + case OP_MOD: + if (v2 == 0) return 0; /* do not attempt to divide by 0 */ + r = luai_nummod(v1, v2); break; + case OP_POW: r = luai_numpow(v1, v2); break; + case OP_UNM: r = luai_numunm(v1); break; + case OP_LEN: return 0; /* no constant folding for 'len' */ + default: lua_assert(0); r = 0; break; + } + if (luai_numisnan(r)) return 0; /* do not attempt to produce NaN */ + e1->u.nval = r; + return 1; +} + + +static void codearith (FuncState *fs, OpCode op, expdesc *e1, expdesc *e2) { + if (constfolding(op, e1, e2)) + return; + else { + int o2 = (op != OP_UNM && op != OP_LEN) ? luaK_exp2RK(fs, e2) : 0; + int o1 = luaK_exp2RK(fs, e1); + if (o1 > o2) { + freeexp(fs, e1); + freeexp(fs, e2); + } + else { + freeexp(fs, e2); + freeexp(fs, e1); + } + e1->u.s.info = luaK_codeABC(fs, op, 0, o1, o2); + e1->k = VRELOCABLE; + } +} + + +static void codecomp (FuncState *fs, OpCode op, int cond, expdesc *e1, + expdesc *e2) { + int o1 = luaK_exp2RK(fs, e1); + int o2 = luaK_exp2RK(fs, e2); + freeexp(fs, e2); + freeexp(fs, e1); + if (cond == 0 && op != OP_EQ) { + int temp; /* exchange args to replace by `<' or `<=' */ + temp = o1; o1 = o2; o2 = temp; /* o1 <==> o2 */ + cond = 1; + } + e1->u.s.info = condjump(fs, op, cond, o1, o2); + e1->k = VJMP; +} + + +void luaK_prefix (FuncState *fs, UnOpr op, expdesc *e) { + expdesc e2; + e2.t = e2.f = NO_JUMP; e2.k = VKNUM; e2.u.nval = 0; + switch (op) { + case OPR_MINUS: { + if (!isnumeral(e)) + luaK_exp2anyreg(fs, e); /* cannot operate on non-numeric constants */ + codearith(fs, OP_UNM, e, &e2); + break; + } + case OPR_NOT: codenot(fs, e); break; + case OPR_LEN: { + luaK_exp2anyreg(fs, e); /* cannot operate on constants */ + codearith(fs, OP_LEN, e, &e2); + break; + } + default: lua_assert(0); + } +} + + +void luaK_infix (FuncState *fs, BinOpr op, expdesc *v) { + switch (op) { + case OPR_AND: { + luaK_goiftrue(fs, v); + break; + } + case OPR_OR: { + luaK_goiffalse(fs, v); + break; + } + case OPR_CONCAT: { + luaK_exp2nextreg(fs, v); /* operand must be on the `stack' */ + break; + } + case OPR_ADD: case OPR_SUB: case OPR_MUL: case OPR_DIV: + case OPR_MOD: case OPR_POW: { + if (!isnumeral(v)) luaK_exp2RK(fs, v); + break; + } + default: { + luaK_exp2RK(fs, v); + break; + } + } +} + + +void luaK_posfix (FuncState *fs, BinOpr op, expdesc *e1, expdesc *e2) { + switch (op) { + case OPR_AND: { + lua_assert(e1->t == NO_JUMP); /* list must be closed */ + luaK_dischargevars(fs, e2); + luaK_concat(fs, &e2->f, e1->f); + *e1 = *e2; + break; + } + case OPR_OR: { + lua_assert(e1->f == NO_JUMP); /* list must be closed */ + luaK_dischargevars(fs, e2); + luaK_concat(fs, &e2->t, e1->t); + *e1 = *e2; + break; + } + case OPR_CONCAT: { + luaK_exp2val(fs, e2); + if (e2->k == VRELOCABLE && GET_OPCODE(getcode(fs, e2)) == OP_CONCAT) { + lua_assert(e1->u.s.info == GETARG_B(getcode(fs, e2))-1); + freeexp(fs, e1); + SETARG_B(getcode(fs, e2), e1->u.s.info); + e1->k = VRELOCABLE; e1->u.s.info = e2->u.s.info; + } + else { + luaK_exp2nextreg(fs, e2); /* operand must be on the 'stack' */ + codearith(fs, OP_CONCAT, e1, e2); + } + break; + } + case OPR_ADD: codearith(fs, OP_ADD, e1, e2); break; + case OPR_SUB: codearith(fs, OP_SUB, e1, e2); break; + case OPR_MUL: codearith(fs, OP_MUL, e1, e2); break; + case OPR_DIV: codearith(fs, OP_DIV, e1, e2); break; + case OPR_MOD: codearith(fs, OP_MOD, e1, e2); break; + case OPR_POW: codearith(fs, OP_POW, e1, e2); break; + case OPR_EQ: codecomp(fs, OP_EQ, 1, e1, e2); break; + case OPR_NE: codecomp(fs, OP_EQ, 0, e1, e2); break; + case OPR_LT: codecomp(fs, OP_LT, 1, e1, e2); break; + case OPR_LE: codecomp(fs, OP_LE, 1, e1, e2); break; + case OPR_GT: codecomp(fs, OP_LT, 0, e1, e2); break; + case OPR_GE: codecomp(fs, OP_LE, 0, e1, e2); break; + default: lua_assert(0); + } +} + + +void luaK_fixline (FuncState *fs, int line) { + fs->f->lineinfo[fs->pc - 1] = line; +} + + +static int luaK_code (FuncState *fs, Instruction i, int line) { + Proto *f = fs->f; + dischargejpc(fs); /* `pc' will change */ + /* put new instruction in code array */ + luaM_growvector(fs->L, f->code, fs->pc, f->sizecode, Instruction, + MAX_INT, "code size overflow"); + f->code[fs->pc] = i; + /* save corresponding line information */ + luaM_growvector(fs->L, f->lineinfo, fs->pc, f->sizelineinfo, int, + MAX_INT, "code size overflow"); + f->lineinfo[fs->pc] = line; + return fs->pc++; +} + + +int luaK_codeABC (FuncState *fs, OpCode o, int a, int b, int c) { + lua_assert(getOpMode(o) == iABC); + lua_assert(getBMode(o) != OpArgN || b == 0); + lua_assert(getCMode(o) != OpArgN || c == 0); + return luaK_code(fs, CREATE_ABC(o, a, b, c), fs->ls->lastline); +} + + +int luaK_codeABx (FuncState *fs, OpCode o, int a, unsigned int bc) { + lua_assert(getOpMode(o) == iABx || getOpMode(o) == iAsBx); + lua_assert(getCMode(o) == OpArgN); + return luaK_code(fs, CREATE_ABx(o, a, bc), fs->ls->lastline); +} + + +void luaK_setlist (FuncState *fs, int base, int nelems, int tostore) { + int c = (nelems - 1)/LFIELDS_PER_FLUSH + 1; + int b = (tostore == LUA_MULTRET) ? 0 : tostore; + lua_assert(tostore != 0); + if (c <= MAXARG_C) + luaK_codeABC(fs, OP_SETLIST, base, b, c); + else { + luaK_codeABC(fs, OP_SETLIST, base, b, 0); + luaK_code(fs, cast(Instruction, c), fs->ls->lastline); + } + fs->freereg = base + 1; /* free registers with list values */ +} + diff --git a/deps/lua/src/lcode.h b/deps/lua/src/lcode.h new file mode 100644 index 0000000..b941c60 --- /dev/null +++ b/deps/lua/src/lcode.h @@ -0,0 +1,76 @@ +/* +** $Id: lcode.h,v 1.48.1.1 2007/12/27 13:02:25 roberto Exp $ +** Code generator for Lua +** See Copyright Notice in lua.h +*/ + +#ifndef lcode_h +#define lcode_h + +#include "llex.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lparser.h" + + +/* +** Marks the end of a patch list. It is an invalid value both as an absolute +** address, and as a list link (would link an element to itself). +*/ +#define NO_JUMP (-1) + + +/* +** grep "ORDER OPR" if you change these enums +*/ +typedef enum BinOpr { + OPR_ADD, OPR_SUB, OPR_MUL, OPR_DIV, OPR_MOD, OPR_POW, + OPR_CONCAT, + OPR_NE, OPR_EQ, + OPR_LT, OPR_LE, OPR_GT, OPR_GE, + OPR_AND, OPR_OR, + OPR_NOBINOPR +} BinOpr; + + +typedef enum UnOpr { OPR_MINUS, OPR_NOT, OPR_LEN, OPR_NOUNOPR } UnOpr; + + +#define getcode(fs,e) ((fs)->f->code[(e)->u.s.info]) + +#define luaK_codeAsBx(fs,o,A,sBx) luaK_codeABx(fs,o,A,(sBx)+MAXARG_sBx) + +#define luaK_setmultret(fs,e) luaK_setreturns(fs, e, LUA_MULTRET) + +LUAI_FUNC int luaK_codeABx (FuncState *fs, OpCode o, int A, unsigned int Bx); +LUAI_FUNC int luaK_codeABC (FuncState *fs, OpCode o, int A, int B, int C); +LUAI_FUNC void luaK_fixline (FuncState *fs, int line); +LUAI_FUNC void luaK_nil (FuncState *fs, int from, int n); +LUAI_FUNC void luaK_reserveregs (FuncState *fs, int n); +LUAI_FUNC void luaK_checkstack (FuncState *fs, int n); +LUAI_FUNC int luaK_stringK (FuncState *fs, TString *s); +LUAI_FUNC int luaK_numberK (FuncState *fs, lua_Number r); +LUAI_FUNC void luaK_dischargevars (FuncState *fs, expdesc *e); +LUAI_FUNC int luaK_exp2anyreg (FuncState *fs, expdesc *e); +LUAI_FUNC void luaK_exp2nextreg (FuncState *fs, expdesc *e); +LUAI_FUNC void luaK_exp2val (FuncState *fs, expdesc *e); +LUAI_FUNC int luaK_exp2RK (FuncState *fs, expdesc *e); +LUAI_FUNC void luaK_self (FuncState *fs, expdesc *e, expdesc *key); +LUAI_FUNC void luaK_indexed (FuncState *fs, expdesc *t, expdesc *k); +LUAI_FUNC void luaK_goiftrue (FuncState *fs, expdesc *e); +LUAI_FUNC void luaK_storevar (FuncState *fs, expdesc *var, expdesc *e); +LUAI_FUNC void luaK_setreturns (FuncState *fs, expdesc *e, int nresults); +LUAI_FUNC void luaK_setoneret (FuncState *fs, expdesc *e); +LUAI_FUNC int luaK_jump (FuncState *fs); +LUAI_FUNC void luaK_ret (FuncState *fs, int first, int nret); +LUAI_FUNC void luaK_patchlist (FuncState *fs, int list, int target); +LUAI_FUNC void luaK_patchtohere (FuncState *fs, int list); +LUAI_FUNC void luaK_concat (FuncState *fs, int *l1, int l2); +LUAI_FUNC int luaK_getlabel (FuncState *fs); +LUAI_FUNC void luaK_prefix (FuncState *fs, UnOpr op, expdesc *v); +LUAI_FUNC void luaK_infix (FuncState *fs, BinOpr op, expdesc *v); +LUAI_FUNC void luaK_posfix (FuncState *fs, BinOpr op, expdesc *v1, expdesc *v2); +LUAI_FUNC void luaK_setlist (FuncState *fs, int base, int nelems, int tostore); + + +#endif diff --git a/deps/lua/src/ldblib.c b/deps/lua/src/ldblib.c new file mode 100644 index 0000000..2027eda --- /dev/null +++ b/deps/lua/src/ldblib.c @@ -0,0 +1,398 @@ +/* +** $Id: ldblib.c,v 1.104.1.4 2009/08/04 18:50:18 roberto Exp $ +** Interface from Lua to its debug API +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include + +#define ldblib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + + +static int db_getregistry (lua_State *L) { + lua_pushvalue(L, LUA_REGISTRYINDEX); + return 1; +} + + +static int db_getmetatable (lua_State *L) { + luaL_checkany(L, 1); + if (!lua_getmetatable(L, 1)) { + lua_pushnil(L); /* no metatable */ + } + return 1; +} + + +static int db_setmetatable (lua_State *L) { + int t = lua_type(L, 2); + luaL_argcheck(L, t == LUA_TNIL || t == LUA_TTABLE, 2, + "nil or table expected"); + lua_settop(L, 2); + lua_pushboolean(L, lua_setmetatable(L, 1)); + return 1; +} + + +static int db_getfenv (lua_State *L) { + luaL_checkany(L, 1); + lua_getfenv(L, 1); + return 1; +} + + +static int db_setfenv (lua_State *L) { + luaL_checktype(L, 2, LUA_TTABLE); + lua_settop(L, 2); + if (lua_setfenv(L, 1) == 0) + luaL_error(L, LUA_QL("setfenv") + " cannot change environment of given object"); + return 1; +} + + +static void settabss (lua_State *L, const char *i, const char *v) { + lua_pushstring(L, v); + lua_setfield(L, -2, i); +} + + +static void settabsi (lua_State *L, const char *i, int v) { + lua_pushinteger(L, v); + lua_setfield(L, -2, i); +} + + +static lua_State *getthread (lua_State *L, int *arg) { + if (lua_isthread(L, 1)) { + *arg = 1; + return lua_tothread(L, 1); + } + else { + *arg = 0; + return L; + } +} + + +static void treatstackoption (lua_State *L, lua_State *L1, const char *fname) { + if (L == L1) { + lua_pushvalue(L, -2); + lua_remove(L, -3); + } + else + lua_xmove(L1, L, 1); + lua_setfield(L, -2, fname); +} + + +static int db_getinfo (lua_State *L) { + lua_Debug ar; + int arg; + lua_State *L1 = getthread(L, &arg); + const char *options = luaL_optstring(L, arg+2, "flnSu"); + if (lua_isnumber(L, arg+1)) { + if (!lua_getstack(L1, (int)lua_tointeger(L, arg+1), &ar)) { + lua_pushnil(L); /* level out of range */ + return 1; + } + } + else if (lua_isfunction(L, arg+1)) { + lua_pushfstring(L, ">%s", options); + options = lua_tostring(L, -1); + lua_pushvalue(L, arg+1); + lua_xmove(L, L1, 1); + } + else + return luaL_argerror(L, arg+1, "function or level expected"); + if (!lua_getinfo(L1, options, &ar)) + return luaL_argerror(L, arg+2, "invalid option"); + lua_createtable(L, 0, 2); + if (strchr(options, 'S')) { + settabss(L, "source", ar.source); + settabss(L, "short_src", ar.short_src); + settabsi(L, "linedefined", ar.linedefined); + settabsi(L, "lastlinedefined", ar.lastlinedefined); + settabss(L, "what", ar.what); + } + if (strchr(options, 'l')) + settabsi(L, "currentline", ar.currentline); + if (strchr(options, 'u')) + settabsi(L, "nups", ar.nups); + if (strchr(options, 'n')) { + settabss(L, "name", ar.name); + settabss(L, "namewhat", ar.namewhat); + } + if (strchr(options, 'L')) + treatstackoption(L, L1, "activelines"); + if (strchr(options, 'f')) + treatstackoption(L, L1, "func"); + return 1; /* return table */ +} + + +static int db_getlocal (lua_State *L) { + int arg; + lua_State *L1 = getthread(L, &arg); + lua_Debug ar; + const char *name; + if (!lua_getstack(L1, luaL_checkint(L, arg+1), &ar)) /* out of range? */ + return luaL_argerror(L, arg+1, "level out of range"); + name = lua_getlocal(L1, &ar, luaL_checkint(L, arg+2)); + if (name) { + lua_xmove(L1, L, 1); + lua_pushstring(L, name); + lua_pushvalue(L, -2); + return 2; + } + else { + lua_pushnil(L); + return 1; + } +} + + +static int db_setlocal (lua_State *L) { + int arg; + lua_State *L1 = getthread(L, &arg); + lua_Debug ar; + if (!lua_getstack(L1, luaL_checkint(L, arg+1), &ar)) /* out of range? */ + return luaL_argerror(L, arg+1, "level out of range"); + luaL_checkany(L, arg+3); + lua_settop(L, arg+3); + lua_xmove(L, L1, 1); + lua_pushstring(L, lua_setlocal(L1, &ar, luaL_checkint(L, arg+2))); + return 1; +} + + +static int auxupvalue (lua_State *L, int get) { + const char *name; + int n = luaL_checkint(L, 2); + luaL_checktype(L, 1, LUA_TFUNCTION); + if (lua_iscfunction(L, 1)) return 0; /* cannot touch C upvalues from Lua */ + name = get ? lua_getupvalue(L, 1, n) : lua_setupvalue(L, 1, n); + if (name == NULL) return 0; + lua_pushstring(L, name); + lua_insert(L, -(get+1)); + return get + 1; +} + + +static int db_getupvalue (lua_State *L) { + return auxupvalue(L, 1); +} + + +static int db_setupvalue (lua_State *L) { + luaL_checkany(L, 3); + return auxupvalue(L, 0); +} + + + +static const char KEY_HOOK = 'h'; + + +static void hookf (lua_State *L, lua_Debug *ar) { + static const char *const hooknames[] = + {"call", "return", "line", "count", "tail return"}; + lua_pushlightuserdata(L, (void *)&KEY_HOOK); + lua_rawget(L, LUA_REGISTRYINDEX); + lua_pushlightuserdata(L, L); + lua_rawget(L, -2); + if (lua_isfunction(L, -1)) { + lua_pushstring(L, hooknames[(int)ar->event]); + if (ar->currentline >= 0) + lua_pushinteger(L, ar->currentline); + else lua_pushnil(L); + lua_assert(lua_getinfo(L, "lS", ar)); + lua_call(L, 2, 0); + } +} + + +static int makemask (const char *smask, int count) { + int mask = 0; + if (strchr(smask, 'c')) mask |= LUA_MASKCALL; + if (strchr(smask, 'r')) mask |= LUA_MASKRET; + if (strchr(smask, 'l')) mask |= LUA_MASKLINE; + if (count > 0) mask |= LUA_MASKCOUNT; + return mask; +} + + +static char *unmakemask (int mask, char *smask) { + int i = 0; + if (mask & LUA_MASKCALL) smask[i++] = 'c'; + if (mask & LUA_MASKRET) smask[i++] = 'r'; + if (mask & LUA_MASKLINE) smask[i++] = 'l'; + smask[i] = '\0'; + return smask; +} + + +static void gethooktable (lua_State *L) { + lua_pushlightuserdata(L, (void *)&KEY_HOOK); + lua_rawget(L, LUA_REGISTRYINDEX); + if (!lua_istable(L, -1)) { + lua_pop(L, 1); + lua_createtable(L, 0, 1); + lua_pushlightuserdata(L, (void *)&KEY_HOOK); + lua_pushvalue(L, -2); + lua_rawset(L, LUA_REGISTRYINDEX); + } +} + + +static int db_sethook (lua_State *L) { + int arg, mask, count; + lua_Hook func; + lua_State *L1 = getthread(L, &arg); + if (lua_isnoneornil(L, arg+1)) { + lua_settop(L, arg+1); + func = NULL; mask = 0; count = 0; /* turn off hooks */ + } + else { + const char *smask = luaL_checkstring(L, arg+2); + luaL_checktype(L, arg+1, LUA_TFUNCTION); + count = luaL_optint(L, arg+3, 0); + func = hookf; mask = makemask(smask, count); + } + gethooktable(L); + lua_pushlightuserdata(L, L1); + lua_pushvalue(L, arg+1); + lua_rawset(L, -3); /* set new hook */ + lua_pop(L, 1); /* remove hook table */ + lua_sethook(L1, func, mask, count); /* set hooks */ + return 0; +} + + +static int db_gethook (lua_State *L) { + int arg; + lua_State *L1 = getthread(L, &arg); + char buff[5]; + int mask = lua_gethookmask(L1); + lua_Hook hook = lua_gethook(L1); + if (hook != NULL && hook != hookf) /* external hook? */ + lua_pushliteral(L, "external hook"); + else { + gethooktable(L); + lua_pushlightuserdata(L, L1); + lua_rawget(L, -2); /* get hook */ + lua_remove(L, -2); /* remove hook table */ + } + lua_pushstring(L, unmakemask(mask, buff)); + lua_pushinteger(L, lua_gethookcount(L1)); + return 3; +} + + +static int db_debug (lua_State *L) { + for (;;) { + char buffer[250]; + fputs("lua_debug> ", stderr); + if (fgets(buffer, sizeof(buffer), stdin) == 0 || + strcmp(buffer, "cont\n") == 0) + return 0; + if (luaL_loadbuffer(L, buffer, strlen(buffer), "=(debug command)") || + lua_pcall(L, 0, 0, 0)) { + fputs(lua_tostring(L, -1), stderr); + fputs("\n", stderr); + } + lua_settop(L, 0); /* remove eventual returns */ + } +} + + +#define LEVELS1 12 /* size of the first part of the stack */ +#define LEVELS2 10 /* size of the second part of the stack */ + +static int db_errorfb (lua_State *L) { + int level; + int firstpart = 1; /* still before eventual `...' */ + int arg; + lua_State *L1 = getthread(L, &arg); + lua_Debug ar; + if (lua_isnumber(L, arg+2)) { + level = (int)lua_tointeger(L, arg+2); + lua_pop(L, 1); + } + else + level = (L == L1) ? 1 : 0; /* level 0 may be this own function */ + if (lua_gettop(L) == arg) + lua_pushliteral(L, ""); + else if (!lua_isstring(L, arg+1)) return 1; /* message is not a string */ + else lua_pushliteral(L, "\n"); + lua_pushliteral(L, "stack traceback:"); + while (lua_getstack(L1, level++, &ar)) { + if (level > LEVELS1 && firstpart) { + /* no more than `LEVELS2' more levels? */ + if (!lua_getstack(L1, level+LEVELS2, &ar)) + level--; /* keep going */ + else { + lua_pushliteral(L, "\n\t..."); /* too many levels */ + while (lua_getstack(L1, level+LEVELS2, &ar)) /* find last levels */ + level++; + } + firstpart = 0; + continue; + } + lua_pushliteral(L, "\n\t"); + lua_getinfo(L1, "Snl", &ar); + lua_pushfstring(L, "%s:", ar.short_src); + if (ar.currentline > 0) + lua_pushfstring(L, "%d:", ar.currentline); + if (*ar.namewhat != '\0') /* is there a name? */ + lua_pushfstring(L, " in function " LUA_QS, ar.name); + else { + if (*ar.what == 'm') /* main? */ + lua_pushfstring(L, " in main chunk"); + else if (*ar.what == 'C' || *ar.what == 't') + lua_pushliteral(L, " ?"); /* C function or tail call */ + else + lua_pushfstring(L, " in function <%s:%d>", + ar.short_src, ar.linedefined); + } + lua_concat(L, lua_gettop(L) - arg); + } + lua_concat(L, lua_gettop(L) - arg); + return 1; +} + + +static const luaL_Reg dblib[] = { + {"debug", db_debug}, + {"getfenv", db_getfenv}, + {"gethook", db_gethook}, + {"getinfo", db_getinfo}, + {"getlocal", db_getlocal}, + {"getregistry", db_getregistry}, + {"getmetatable", db_getmetatable}, + {"getupvalue", db_getupvalue}, + {"setfenv", db_setfenv}, + {"sethook", db_sethook}, + {"setlocal", db_setlocal}, + {"setmetatable", db_setmetatable}, + {"setupvalue", db_setupvalue}, + {"traceback", db_errorfb}, + {NULL, NULL} +}; + + +LUALIB_API int luaopen_debug (lua_State *L) { + luaL_register(L, LUA_DBLIBNAME, dblib); + return 1; +} + diff --git a/deps/lua/src/ldebug.c b/deps/lua/src/ldebug.c new file mode 100644 index 0000000..4940e65 --- /dev/null +++ b/deps/lua/src/ldebug.c @@ -0,0 +1,637 @@ +/* +** $Id: ldebug.c,v 2.29.1.6 2008/05/08 16:56:26 roberto Exp $ +** Debug Interface +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include + + +#define ldebug_c +#define LUA_CORE + +#include "lua.h" + +#include "lapi.h" +#include "lcode.h" +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" +#include "lvm.h" + + + +static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name); + + +static int currentpc (lua_State *L, CallInfo *ci) { + if (!isLua(ci)) return -1; /* function is not a Lua function? */ + if (ci == L->ci) + ci->savedpc = L->savedpc; + return pcRel(ci->savedpc, ci_func(ci)->l.p); +} + + +static int currentline (lua_State *L, CallInfo *ci) { + int pc = currentpc(L, ci); + if (pc < 0) + return -1; /* only active lua functions have current-line information */ + else + return getline(ci_func(ci)->l.p, pc); +} + + +/* +** this function can be called asynchronous (e.g. during a signal) +*/ +LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count) { + if (func == NULL || mask == 0) { /* turn off hooks? */ + mask = 0; + func = NULL; + } + L->hook = func; + L->basehookcount = count; + resethookcount(L); + L->hookmask = cast_byte(mask); + return 1; +} + + +LUA_API lua_Hook lua_gethook (lua_State *L) { + return L->hook; +} + + +LUA_API int lua_gethookmask (lua_State *L) { + return L->hookmask; +} + + +LUA_API int lua_gethookcount (lua_State *L) { + return L->basehookcount; +} + +LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar) { + int status; + CallInfo *ci; + lua_lock(L); + for (ci = L->ci; level > 0 && ci > L->base_ci; ci--) { + level--; + if (f_isLua(ci)) /* Lua function? */ + level -= ci->tailcalls; /* skip lost tail calls */ + } + if (level == 0 && ci > L->base_ci) { /* level found? */ + status = 1; + ar->i_ci = cast_int(ci - L->base_ci); + } + else if (level < 0) { /* level is of a lost tail call? */ + status = 1; + ar->i_ci = 0; + } + else status = 0; /* no such level */ + lua_unlock(L); + return status; +} + + +static Proto *getluaproto (CallInfo *ci) { + return (isLua(ci) ? ci_func(ci)->l.p : NULL); +} + + +static const char *findlocal (lua_State *L, CallInfo *ci, int n) { + const char *name; + Proto *fp = getluaproto(ci); + if (fp && (name = luaF_getlocalname(fp, n, currentpc(L, ci))) != NULL) + return name; /* is a local variable in a Lua function */ + else { + StkId limit = (ci == L->ci) ? L->top : (ci+1)->func; + if (limit - ci->base >= n && n > 0) /* is 'n' inside 'ci' stack? */ + return "(*temporary)"; + else + return NULL; + } +} + + +LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n) { + CallInfo *ci = L->base_ci + ar->i_ci; + const char *name = findlocal(L, ci, n); + lua_lock(L); + if (name) + luaA_pushobject(L, ci->base + (n - 1)); + lua_unlock(L); + return name; +} + + +LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n) { + CallInfo *ci = L->base_ci + ar->i_ci; + const char *name = findlocal(L, ci, n); + lua_lock(L); + if (name) + setobjs2s(L, ci->base + (n - 1), L->top - 1); + L->top--; /* pop value */ + lua_unlock(L); + return name; +} + + +static void funcinfo (lua_Debug *ar, Closure *cl) { + if (cl->c.isC) { + ar->source = "=[C]"; + ar->linedefined = -1; + ar->lastlinedefined = -1; + ar->what = "C"; + } + else { + ar->source = getstr(cl->l.p->source); + ar->linedefined = cl->l.p->linedefined; + ar->lastlinedefined = cl->l.p->lastlinedefined; + ar->what = (ar->linedefined == 0) ? "main" : "Lua"; + } + luaO_chunkid(ar->short_src, ar->source, LUA_IDSIZE); +} + + +static void info_tailcall (lua_Debug *ar) { + ar->name = ar->namewhat = ""; + ar->what = "tail"; + ar->lastlinedefined = ar->linedefined = ar->currentline = -1; + ar->source = "=(tail call)"; + luaO_chunkid(ar->short_src, ar->source, LUA_IDSIZE); + ar->nups = 0; +} + + +static void collectvalidlines (lua_State *L, Closure *f) { + if (f == NULL || f->c.isC) { + setnilvalue(L->top); + } + else { + Table *t = luaH_new(L, 0, 0); + int *lineinfo = f->l.p->lineinfo; + int i; + for (i=0; il.p->sizelineinfo; i++) + setbvalue(luaH_setnum(L, t, lineinfo[i]), 1); + sethvalue(L, L->top, t); + } + incr_top(L); +} + + +static int auxgetinfo (lua_State *L, const char *what, lua_Debug *ar, + Closure *f, CallInfo *ci) { + int status = 1; + if (f == NULL) { + info_tailcall(ar); + return status; + } + for (; *what; what++) { + switch (*what) { + case 'S': { + funcinfo(ar, f); + break; + } + case 'l': { + ar->currentline = (ci) ? currentline(L, ci) : -1; + break; + } + case 'u': { + ar->nups = f->c.nupvalues; + break; + } + case 'n': { + ar->namewhat = (ci) ? getfuncname(L, ci, &ar->name) : NULL; + if (ar->namewhat == NULL) { + ar->namewhat = ""; /* not found */ + ar->name = NULL; + } + break; + } + case 'L': + case 'f': /* handled by lua_getinfo */ + break; + default: status = 0; /* invalid option */ + } + } + return status; +} + + +LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar) { + int status; + Closure *f = NULL; + CallInfo *ci = NULL; + lua_lock(L); + if (*what == '>') { + StkId func = L->top - 1; + luai_apicheck(L, ttisfunction(func)); + what++; /* skip the '>' */ + f = clvalue(func); + L->top--; /* pop function */ + } + else if (ar->i_ci != 0) { /* no tail call? */ + ci = L->base_ci + ar->i_ci; + lua_assert(ttisfunction(ci->func)); + f = clvalue(ci->func); + } + status = auxgetinfo(L, what, ar, f, ci); + if (strchr(what, 'f')) { + if (f == NULL) setnilvalue(L->top); + else setclvalue(L, L->top, f); + incr_top(L); + } + if (strchr(what, 'L')) + collectvalidlines(L, f); + lua_unlock(L); + return status; +} + + +/* +** {====================================================== +** Symbolic Execution and code checker +** ======================================================= +*/ + +#define check(x) if (!(x)) return 0; + +#define checkjump(pt,pc) check(0 <= pc && pc < pt->sizecode) + +#define checkreg(pt,reg) check((reg) < (pt)->maxstacksize) + + + +static int precheck (const Proto *pt) { + check(pt->maxstacksize <= MAXSTACK); + check(pt->numparams+(pt->is_vararg & VARARG_HASARG) <= pt->maxstacksize); + check(!(pt->is_vararg & VARARG_NEEDSARG) || + (pt->is_vararg & VARARG_HASARG)); + check(pt->sizeupvalues <= pt->nups); + check(pt->sizelineinfo == pt->sizecode || pt->sizelineinfo == 0); + check(pt->sizecode > 0 && GET_OPCODE(pt->code[pt->sizecode-1]) == OP_RETURN); + return 1; +} + + +#define checkopenop(pt,pc) luaG_checkopenop((pt)->code[(pc)+1]) + +int luaG_checkopenop (Instruction i) { + switch (GET_OPCODE(i)) { + case OP_CALL: + case OP_TAILCALL: + case OP_RETURN: + case OP_SETLIST: { + check(GETARG_B(i) == 0); + return 1; + } + default: return 0; /* invalid instruction after an open call */ + } +} + + +static int checkArgMode (const Proto *pt, int r, enum OpArgMask mode) { + switch (mode) { + case OpArgN: check(r == 0); break; + case OpArgU: break; + case OpArgR: checkreg(pt, r); break; + case OpArgK: + check(ISK(r) ? INDEXK(r) < pt->sizek : r < pt->maxstacksize); + break; + } + return 1; +} + + +static Instruction symbexec (const Proto *pt, int lastpc, int reg) { + int pc; + int last; /* stores position of last instruction that changed `reg' */ + last = pt->sizecode-1; /* points to final return (a `neutral' instruction) */ + check(precheck(pt)); + for (pc = 0; pc < lastpc; pc++) { + Instruction i = pt->code[pc]; + OpCode op = GET_OPCODE(i); + int a = GETARG_A(i); + int b = 0; + int c = 0; + check(op < NUM_OPCODES); + checkreg(pt, a); + switch (getOpMode(op)) { + case iABC: { + b = GETARG_B(i); + c = GETARG_C(i); + check(checkArgMode(pt, b, getBMode(op))); + check(checkArgMode(pt, c, getCMode(op))); + break; + } + case iABx: { + b = GETARG_Bx(i); + if (getBMode(op) == OpArgK) check(b < pt->sizek); + break; + } + case iAsBx: { + b = GETARG_sBx(i); + if (getBMode(op) == OpArgR) { + int dest = pc+1+b; + check(0 <= dest && dest < pt->sizecode); + if (dest > 0) { + int j; + /* check that it does not jump to a setlist count; this + is tricky, because the count from a previous setlist may + have the same value of an invalid setlist; so, we must + go all the way back to the first of them (if any) */ + for (j = 0; j < dest; j++) { + Instruction d = pt->code[dest-1-j]; + if (!(GET_OPCODE(d) == OP_SETLIST && GETARG_C(d) == 0)) break; + } + /* if 'j' is even, previous value is not a setlist (even if + it looks like one) */ + check((j&1) == 0); + } + } + break; + } + } + if (testAMode(op)) { + if (a == reg) last = pc; /* change register `a' */ + } + if (testTMode(op)) { + check(pc+2 < pt->sizecode); /* check skip */ + check(GET_OPCODE(pt->code[pc+1]) == OP_JMP); + } + switch (op) { + case OP_LOADBOOL: { + if (c == 1) { /* does it jump? */ + check(pc+2 < pt->sizecode); /* check its jump */ + check(GET_OPCODE(pt->code[pc+1]) != OP_SETLIST || + GETARG_C(pt->code[pc+1]) != 0); + } + break; + } + case OP_LOADNIL: { + if (a <= reg && reg <= b) + last = pc; /* set registers from `a' to `b' */ + break; + } + case OP_GETUPVAL: + case OP_SETUPVAL: { + check(b < pt->nups); + break; + } + case OP_GETGLOBAL: + case OP_SETGLOBAL: { + check(ttisstring(&pt->k[b])); + break; + } + case OP_SELF: { + checkreg(pt, a+1); + if (reg == a+1) last = pc; + break; + } + case OP_CONCAT: { + check(b < c); /* at least two operands */ + break; + } + case OP_TFORLOOP: { + check(c >= 1); /* at least one result (control variable) */ + checkreg(pt, a+2+c); /* space for results */ + if (reg >= a+2) last = pc; /* affect all regs above its base */ + break; + } + case OP_FORLOOP: + case OP_FORPREP: + checkreg(pt, a+3); + /* go through */ + case OP_JMP: { + int dest = pc+1+b; + /* not full check and jump is forward and do not skip `lastpc'? */ + if (reg != NO_REG && pc < dest && dest <= lastpc) + pc += b; /* do the jump */ + break; + } + case OP_CALL: + case OP_TAILCALL: { + if (b != 0) { + checkreg(pt, a+b-1); + } + c--; /* c = num. returns */ + if (c == LUA_MULTRET) { + check(checkopenop(pt, pc)); + } + else if (c != 0) + checkreg(pt, a+c-1); + if (reg >= a) last = pc; /* affect all registers above base */ + break; + } + case OP_RETURN: { + b--; /* b = num. returns */ + if (b > 0) checkreg(pt, a+b-1); + break; + } + case OP_SETLIST: { + if (b > 0) checkreg(pt, a + b); + if (c == 0) { + pc++; + check(pc < pt->sizecode - 1); + } + break; + } + case OP_CLOSURE: { + int nup, j; + check(b < pt->sizep); + nup = pt->p[b]->nups; + check(pc + nup < pt->sizecode); + for (j = 1; j <= nup; j++) { + OpCode op1 = GET_OPCODE(pt->code[pc + j]); + check(op1 == OP_GETUPVAL || op1 == OP_MOVE); + } + if (reg != NO_REG) /* tracing? */ + pc += nup; /* do not 'execute' these pseudo-instructions */ + break; + } + case OP_VARARG: { + check((pt->is_vararg & VARARG_ISVARARG) && + !(pt->is_vararg & VARARG_NEEDSARG)); + b--; + if (b == LUA_MULTRET) check(checkopenop(pt, pc)); + checkreg(pt, a+b-1); + break; + } + default: break; + } + } + return pt->code[last]; +} + +#undef check +#undef checkjump +#undef checkreg + +/* }====================================================== */ + + +int luaG_checkcode (const Proto *pt) { + return (symbexec(pt, pt->sizecode, NO_REG) != 0); +} + + +static const char *kname (Proto *p, int c) { + if (ISK(c) && ttisstring(&p->k[INDEXK(c)])) + return svalue(&p->k[INDEXK(c)]); + else + return "?"; +} + + +static const char *getobjname (lua_State *L, CallInfo *ci, int stackpos, + const char **name) { + if (isLua(ci)) { /* a Lua function? */ + Proto *p = ci_func(ci)->l.p; + int pc = currentpc(L, ci); + Instruction i; + *name = luaF_getlocalname(p, stackpos+1, pc); + if (*name) /* is a local? */ + return "local"; + i = symbexec(p, pc, stackpos); /* try symbolic execution */ + lua_assert(pc != -1); + switch (GET_OPCODE(i)) { + case OP_GETGLOBAL: { + int g = GETARG_Bx(i); /* global index */ + lua_assert(ttisstring(&p->k[g])); + *name = svalue(&p->k[g]); + return "global"; + } + case OP_MOVE: { + int a = GETARG_A(i); + int b = GETARG_B(i); /* move from `b' to `a' */ + if (b < a) + return getobjname(L, ci, b, name); /* get name for `b' */ + break; + } + case OP_GETTABLE: { + int k = GETARG_C(i); /* key index */ + *name = kname(p, k); + return "field"; + } + case OP_GETUPVAL: { + int u = GETARG_B(i); /* upvalue index */ + *name = p->upvalues ? getstr(p->upvalues[u]) : "?"; + return "upvalue"; + } + case OP_SELF: { + int k = GETARG_C(i); /* key index */ + *name = kname(p, k); + return "method"; + } + default: break; + } + } + return NULL; /* no useful name found */ +} + + +static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name) { + Instruction i; + if ((isLua(ci) && ci->tailcalls > 0) || !isLua(ci - 1)) + return NULL; /* calling function is not Lua (or is unknown) */ + ci--; /* calling function */ + i = ci_func(ci)->l.p->code[currentpc(L, ci)]; + if (GET_OPCODE(i) == OP_CALL || GET_OPCODE(i) == OP_TAILCALL || + GET_OPCODE(i) == OP_TFORLOOP) + return getobjname(L, ci, GETARG_A(i), name); + else + return NULL; /* no useful name can be found */ +} + + +/* only ANSI way to check whether a pointer points to an array */ +static int isinstack (CallInfo *ci, const TValue *o) { + StkId p; + for (p = ci->base; p < ci->top; p++) + if (o == p) return 1; + return 0; +} + + +void luaG_typeerror (lua_State *L, const TValue *o, const char *op) { + const char *name = NULL; + const char *t = luaT_typenames[ttype(o)]; + const char *kind = (isinstack(L->ci, o)) ? + getobjname(L, L->ci, cast_int(o - L->base), &name) : + NULL; + if (kind) + luaG_runerror(L, "attempt to %s %s " LUA_QS " (a %s value)", + op, kind, name, t); + else + luaG_runerror(L, "attempt to %s a %s value", op, t); +} + + +void luaG_concaterror (lua_State *L, StkId p1, StkId p2) { + if (ttisstring(p1) || ttisnumber(p1)) p1 = p2; + lua_assert(!ttisstring(p1) && !ttisnumber(p1)); + luaG_typeerror(L, p1, "concatenate"); +} + + +void luaG_aritherror (lua_State *L, const TValue *p1, const TValue *p2) { + TValue temp; + if (luaV_tonumber(p1, &temp) == NULL) + p2 = p1; /* first operand is wrong */ + luaG_typeerror(L, p2, "perform arithmetic on"); +} + + +int luaG_ordererror (lua_State *L, const TValue *p1, const TValue *p2) { + const char *t1 = luaT_typenames[ttype(p1)]; + const char *t2 = luaT_typenames[ttype(p2)]; + if (t1[2] == t2[2]) + luaG_runerror(L, "attempt to compare two %s values", t1); + else + luaG_runerror(L, "attempt to compare %s with %s", t1, t2); + return 0; +} + + +static void addinfo (lua_State *L, const char *msg) { + CallInfo *ci = L->ci; + if (isLua(ci)) { /* is Lua code? */ + char buff[LUA_IDSIZE]; /* add file:line information */ + int line = currentline(L, ci); + luaO_chunkid(buff, getstr(getluaproto(ci)->source), LUA_IDSIZE); + luaO_pushfstring(L, "%s:%d: %s", buff, line, msg); + } +} + + +void luaG_errormsg (lua_State *L) { + if (L->errfunc != 0) { /* is there an error handling function? */ + StkId errfunc = restorestack(L, L->errfunc); + if (!ttisfunction(errfunc)) luaD_throw(L, LUA_ERRERR); + setobjs2s(L, L->top, L->top - 1); /* move argument */ + setobjs2s(L, L->top - 1, errfunc); /* push function */ + incr_top(L); + luaD_call(L, L->top - 2, 1); /* call it */ + } + luaD_throw(L, LUA_ERRRUN); +} + + +void luaG_runerror (lua_State *L, const char *fmt, ...) { + va_list argp; + va_start(argp, fmt); + addinfo(L, luaO_pushvfstring(L, fmt, argp)); + va_end(argp); + luaG_errormsg(L); +} + diff --git a/deps/lua/src/ldebug.h b/deps/lua/src/ldebug.h new file mode 100644 index 0000000..ba28a97 --- /dev/null +++ b/deps/lua/src/ldebug.h @@ -0,0 +1,33 @@ +/* +** $Id: ldebug.h,v 2.3.1.1 2007/12/27 13:02:25 roberto Exp $ +** Auxiliary functions from Debug Interface module +** See Copyright Notice in lua.h +*/ + +#ifndef ldebug_h +#define ldebug_h + + +#include "lstate.h" + + +#define pcRel(pc, p) (cast(int, (pc) - (p)->code) - 1) + +#define getline(f,pc) (((f)->lineinfo) ? (f)->lineinfo[pc] : 0) + +#define resethookcount(L) (L->hookcount = L->basehookcount) + + +LUAI_FUNC void luaG_typeerror (lua_State *L, const TValue *o, + const char *opname); +LUAI_FUNC void luaG_concaterror (lua_State *L, StkId p1, StkId p2); +LUAI_FUNC void luaG_aritherror (lua_State *L, const TValue *p1, + const TValue *p2); +LUAI_FUNC int luaG_ordererror (lua_State *L, const TValue *p1, + const TValue *p2); +LUAI_FUNC void luaG_runerror (lua_State *L, const char *fmt, ...); +LUAI_FUNC void luaG_errormsg (lua_State *L); +LUAI_FUNC int luaG_checkcode (const Proto *pt); +LUAI_FUNC int luaG_checkopenop (Instruction i); + +#endif diff --git a/deps/lua/src/ldo.c b/deps/lua/src/ldo.c new file mode 100644 index 0000000..e641dfc --- /dev/null +++ b/deps/lua/src/ldo.c @@ -0,0 +1,519 @@ +/* +** $Id: ldo.c,v 2.38.1.4 2012/01/18 02:27:10 roberto Exp $ +** Stack and Call structure of Lua +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include + +#define ldo_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lgc.h" +#include "lmem.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lparser.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" +#include "lundump.h" +#include "lvm.h" +#include "lzio.h" + + + + +/* +** {====================================================== +** Error-recovery functions +** ======================================================= +*/ + + +/* chain list of long jump buffers */ +struct lua_longjmp { + struct lua_longjmp *previous; + luai_jmpbuf b; + volatile int status; /* error code */ +}; + + +void luaD_seterrorobj (lua_State *L, int errcode, StkId oldtop) { + switch (errcode) { + case LUA_ERRMEM: { + setsvalue2s(L, oldtop, luaS_newliteral(L, MEMERRMSG)); + break; + } + case LUA_ERRERR: { + setsvalue2s(L, oldtop, luaS_newliteral(L, "error in error handling")); + break; + } + case LUA_ERRSYNTAX: + case LUA_ERRRUN: { + setobjs2s(L, oldtop, L->top - 1); /* error message on current top */ + break; + } + } + L->top = oldtop + 1; +} + + +static void restore_stack_limit (lua_State *L) { + lua_assert(L->stack_last - L->stack == L->stacksize - EXTRA_STACK - 1); + if (L->size_ci > LUAI_MAXCALLS) { /* there was an overflow? */ + int inuse = cast_int(L->ci - L->base_ci); + if (inuse + 1 < LUAI_MAXCALLS) /* can `undo' overflow? */ + luaD_reallocCI(L, LUAI_MAXCALLS); + } +} + + +static void resetstack (lua_State *L, int status) { + L->ci = L->base_ci; + L->base = L->ci->base; + luaF_close(L, L->base); /* close eventual pending closures */ + luaD_seterrorobj(L, status, L->base); + L->nCcalls = L->baseCcalls; + L->allowhook = 1; + restore_stack_limit(L); + L->errfunc = 0; + L->errorJmp = NULL; +} + + +void luaD_throw (lua_State *L, int errcode) { + if (L->errorJmp) { + L->errorJmp->status = errcode; + LUAI_THROW(L, L->errorJmp); + } + else { + L->status = cast_byte(errcode); + if (G(L)->panic) { + resetstack(L, errcode); + lua_unlock(L); + G(L)->panic(L); + } + exit(EXIT_FAILURE); + } +} + + +int luaD_rawrunprotected (lua_State *L, Pfunc f, void *ud) { + struct lua_longjmp lj; + lj.status = 0; + lj.previous = L->errorJmp; /* chain new error handler */ + L->errorJmp = &lj; + LUAI_TRY(L, &lj, + (*f)(L, ud); + ); + L->errorJmp = lj.previous; /* restore old error handler */ + return lj.status; +} + +/* }====================================================== */ + + +static void correctstack (lua_State *L, TValue *oldstack) { + CallInfo *ci; + GCObject *up; + L->top = (L->top - oldstack) + L->stack; + for (up = L->openupval; up != NULL; up = up->gch.next) + gco2uv(up)->v = (gco2uv(up)->v - oldstack) + L->stack; + for (ci = L->base_ci; ci <= L->ci; ci++) { + ci->top = (ci->top - oldstack) + L->stack; + ci->base = (ci->base - oldstack) + L->stack; + ci->func = (ci->func - oldstack) + L->stack; + } + L->base = (L->base - oldstack) + L->stack; +} + + +void luaD_reallocstack (lua_State *L, int newsize) { + TValue *oldstack = L->stack; + int realsize = newsize + 1 + EXTRA_STACK; + lua_assert(L->stack_last - L->stack == L->stacksize - EXTRA_STACK - 1); + luaM_reallocvector(L, L->stack, L->stacksize, realsize, TValue); + L->stacksize = realsize; + L->stack_last = L->stack+newsize; + correctstack(L, oldstack); +} + + +void luaD_reallocCI (lua_State *L, int newsize) { + CallInfo *oldci = L->base_ci; + luaM_reallocvector(L, L->base_ci, L->size_ci, newsize, CallInfo); + L->size_ci = newsize; + L->ci = (L->ci - oldci) + L->base_ci; + L->end_ci = L->base_ci + L->size_ci - 1; +} + + +void luaD_growstack (lua_State *L, int n) { + if (n <= L->stacksize) /* double size is enough? */ + luaD_reallocstack(L, 2*L->stacksize); + else + luaD_reallocstack(L, L->stacksize + n); +} + + +static CallInfo *growCI (lua_State *L) { + if (L->size_ci > LUAI_MAXCALLS) /* overflow while handling overflow? */ + luaD_throw(L, LUA_ERRERR); + else { + luaD_reallocCI(L, 2*L->size_ci); + if (L->size_ci > LUAI_MAXCALLS) + luaG_runerror(L, "stack overflow"); + } + return ++L->ci; +} + + +void luaD_callhook (lua_State *L, int event, int line) { + lua_Hook hook = L->hook; + if (hook && L->allowhook) { + ptrdiff_t top = savestack(L, L->top); + ptrdiff_t ci_top = savestack(L, L->ci->top); + lua_Debug ar; + ar.event = event; + ar.currentline = line; + if (event == LUA_HOOKTAILRET) + ar.i_ci = 0; /* tail call; no debug information about it */ + else + ar.i_ci = cast_int(L->ci - L->base_ci); + luaD_checkstack(L, LUA_MINSTACK); /* ensure minimum stack size */ + L->ci->top = L->top + LUA_MINSTACK; + lua_assert(L->ci->top <= L->stack_last); + L->allowhook = 0; /* cannot call hooks inside a hook */ + lua_unlock(L); + (*hook)(L, &ar); + lua_lock(L); + lua_assert(!L->allowhook); + L->allowhook = 1; + L->ci->top = restorestack(L, ci_top); + L->top = restorestack(L, top); + } +} + + +static StkId adjust_varargs (lua_State *L, Proto *p, int actual) { + int i; + int nfixargs = p->numparams; + Table *htab = NULL; + StkId base, fixed; + for (; actual < nfixargs; ++actual) + setnilvalue(L->top++); +#if defined(LUA_COMPAT_VARARG) + if (p->is_vararg & VARARG_NEEDSARG) { /* compat. with old-style vararg? */ + int nvar = actual - nfixargs; /* number of extra arguments */ + lua_assert(p->is_vararg & VARARG_HASARG); + luaC_checkGC(L); + luaD_checkstack(L, p->maxstacksize); + htab = luaH_new(L, nvar, 1); /* create `arg' table */ + for (i=0; itop - nvar + i); + /* store counter in field `n' */ + setnvalue(luaH_setstr(L, htab, luaS_newliteral(L, "n")), cast_num(nvar)); + } +#endif + /* move fixed parameters to final position */ + fixed = L->top - actual; /* first fixed argument */ + base = L->top; /* final position of first argument */ + for (i=0; itop++, fixed+i); + setnilvalue(fixed+i); + } + /* add `arg' parameter */ + if (htab) { + sethvalue(L, L->top++, htab); + lua_assert(iswhite(obj2gco(htab))); + } + return base; +} + + +static StkId tryfuncTM (lua_State *L, StkId func) { + const TValue *tm = luaT_gettmbyobj(L, func, TM_CALL); + StkId p; + ptrdiff_t funcr = savestack(L, func); + if (!ttisfunction(tm)) + luaG_typeerror(L, func, "call"); + /* Open a hole inside the stack at `func' */ + for (p = L->top; p > func; p--) setobjs2s(L, p, p-1); + incr_top(L); + func = restorestack(L, funcr); /* previous call may change stack */ + setobj2s(L, func, tm); /* tag method is the new function to be called */ + return func; +} + + + +#define inc_ci(L) \ + ((L->ci == L->end_ci) ? growCI(L) : \ + (condhardstacktests(luaD_reallocCI(L, L->size_ci)), ++L->ci)) + + +int luaD_precall (lua_State *L, StkId func, int nresults) { + LClosure *cl; + ptrdiff_t funcr; + if (!ttisfunction(func)) /* `func' is not a function? */ + func = tryfuncTM(L, func); /* check the `function' tag method */ + funcr = savestack(L, func); + cl = &clvalue(func)->l; + L->ci->savedpc = L->savedpc; + if (!cl->isC) { /* Lua function? prepare its call */ + CallInfo *ci; + StkId st, base; + Proto *p = cl->p; + luaD_checkstack(L, p->maxstacksize + p->numparams); + func = restorestack(L, funcr); + if (!p->is_vararg) { /* no varargs? */ + base = func + 1; + if (L->top > base + p->numparams) + L->top = base + p->numparams; + } + else { /* vararg function */ + int nargs = cast_int(L->top - func) - 1; + base = adjust_varargs(L, p, nargs); + func = restorestack(L, funcr); /* previous call may change the stack */ + } + ci = inc_ci(L); /* now `enter' new function */ + ci->func = func; + L->base = ci->base = base; + ci->top = L->base + p->maxstacksize; + lua_assert(ci->top <= L->stack_last); + L->savedpc = p->code; /* starting point */ + ci->tailcalls = 0; + ci->nresults = nresults; + for (st = L->top; st < ci->top; st++) + setnilvalue(st); + L->top = ci->top; + if (L->hookmask & LUA_MASKCALL) { + L->savedpc++; /* hooks assume 'pc' is already incremented */ + luaD_callhook(L, LUA_HOOKCALL, -1); + L->savedpc--; /* correct 'pc' */ + } + return PCRLUA; + } + else { /* if is a C function, call it */ + CallInfo *ci; + int n; + luaD_checkstack(L, LUA_MINSTACK); /* ensure minimum stack size */ + ci = inc_ci(L); /* now `enter' new function */ + ci->func = restorestack(L, funcr); + L->base = ci->base = ci->func + 1; + ci->top = L->top + LUA_MINSTACK; + lua_assert(ci->top <= L->stack_last); + ci->nresults = nresults; + if (L->hookmask & LUA_MASKCALL) + luaD_callhook(L, LUA_HOOKCALL, -1); + lua_unlock(L); + n = (*curr_func(L)->c.f)(L); /* do the actual call */ + lua_lock(L); + if (n < 0) /* yielding? */ + return PCRYIELD; + else { + luaD_poscall(L, L->top - n); + return PCRC; + } + } +} + + +static StkId callrethooks (lua_State *L, StkId firstResult) { + ptrdiff_t fr = savestack(L, firstResult); /* next call may change stack */ + luaD_callhook(L, LUA_HOOKRET, -1); + if (f_isLua(L->ci)) { /* Lua function? */ + while ((L->hookmask & LUA_MASKRET) && L->ci->tailcalls--) /* tail calls */ + luaD_callhook(L, LUA_HOOKTAILRET, -1); + } + return restorestack(L, fr); +} + + +int luaD_poscall (lua_State *L, StkId firstResult) { + StkId res; + int wanted, i; + CallInfo *ci; + if (L->hookmask & LUA_MASKRET) + firstResult = callrethooks(L, firstResult); + ci = L->ci--; + res = ci->func; /* res == final position of 1st result */ + wanted = ci->nresults; + L->base = (ci - 1)->base; /* restore base */ + L->savedpc = (ci - 1)->savedpc; /* restore savedpc */ + /* move results to correct place */ + for (i = wanted; i != 0 && firstResult < L->top; i--) + setobjs2s(L, res++, firstResult++); + while (i-- > 0) + setnilvalue(res++); + L->top = res; + return (wanted - LUA_MULTRET); /* 0 iff wanted == LUA_MULTRET */ +} + + +/* +** Call a function (C or Lua). The function to be called is at *func. +** The arguments are on the stack, right after the function. +** When returns, all the results are on the stack, starting at the original +** function position. +*/ +void luaD_call (lua_State *L, StkId func, int nResults) { + if (++L->nCcalls >= LUAI_MAXCCALLS) { + if (L->nCcalls == LUAI_MAXCCALLS) + luaG_runerror(L, "C stack overflow"); + else if (L->nCcalls >= (LUAI_MAXCCALLS + (LUAI_MAXCCALLS>>3))) + luaD_throw(L, LUA_ERRERR); /* error while handing stack error */ + } + if (luaD_precall(L, func, nResults) == PCRLUA) /* is a Lua function? */ + luaV_execute(L, 1); /* call it */ + L->nCcalls--; + luaC_checkGC(L); +} + + +static void resume (lua_State *L, void *ud) { + StkId firstArg = cast(StkId, ud); + CallInfo *ci = L->ci; + if (L->status == 0) { /* start coroutine? */ + lua_assert(ci == L->base_ci && firstArg > L->base); + if (luaD_precall(L, firstArg - 1, LUA_MULTRET) != PCRLUA) + return; + } + else { /* resuming from previous yield */ + lua_assert(L->status == LUA_YIELD); + L->status = 0; + if (!f_isLua(ci)) { /* `common' yield? */ + /* finish interrupted execution of `OP_CALL' */ + lua_assert(GET_OPCODE(*((ci-1)->savedpc - 1)) == OP_CALL || + GET_OPCODE(*((ci-1)->savedpc - 1)) == OP_TAILCALL); + if (luaD_poscall(L, firstArg)) /* complete it... */ + L->top = L->ci->top; /* and correct top if not multiple results */ + } + else /* yielded inside a hook: just continue its execution */ + L->base = L->ci->base; + } + luaV_execute(L, cast_int(L->ci - L->base_ci)); +} + + +static int resume_error (lua_State *L, const char *msg) { + L->top = L->ci->base; + setsvalue2s(L, L->top, luaS_new(L, msg)); + incr_top(L); + lua_unlock(L); + return LUA_ERRRUN; +} + + +LUA_API int lua_resume (lua_State *L, int nargs) { + int status; + lua_lock(L); + if (L->status != LUA_YIELD && (L->status != 0 || L->ci != L->base_ci)) + return resume_error(L, "cannot resume non-suspended coroutine"); + if (L->nCcalls >= LUAI_MAXCCALLS) + return resume_error(L, "C stack overflow"); + luai_userstateresume(L, nargs); + lua_assert(L->errfunc == 0); + L->baseCcalls = ++L->nCcalls; + status = luaD_rawrunprotected(L, resume, L->top - nargs); + if (status != 0) { /* error? */ + L->status = cast_byte(status); /* mark thread as `dead' */ + luaD_seterrorobj(L, status, L->top); + L->ci->top = L->top; + } + else { + lua_assert(L->nCcalls == L->baseCcalls); + status = L->status; + } + --L->nCcalls; + lua_unlock(L); + return status; +} + + +LUA_API int lua_yield (lua_State *L, int nresults) { + luai_userstateyield(L, nresults); + lua_lock(L); + if (L->nCcalls > L->baseCcalls) + luaG_runerror(L, "attempt to yield across metamethod/C-call boundary"); + L->base = L->top - nresults; /* protect stack slots below */ + L->status = LUA_YIELD; + lua_unlock(L); + return -1; +} + + +int luaD_pcall (lua_State *L, Pfunc func, void *u, + ptrdiff_t old_top, ptrdiff_t ef) { + int status; + unsigned short oldnCcalls = L->nCcalls; + ptrdiff_t old_ci = saveci(L, L->ci); + lu_byte old_allowhooks = L->allowhook; + ptrdiff_t old_errfunc = L->errfunc; + L->errfunc = ef; + status = luaD_rawrunprotected(L, func, u); + if (status != 0) { /* an error occurred? */ + StkId oldtop = restorestack(L, old_top); + luaF_close(L, oldtop); /* close eventual pending closures */ + luaD_seterrorobj(L, status, oldtop); + L->nCcalls = oldnCcalls; + L->ci = restoreci(L, old_ci); + L->base = L->ci->base; + L->savedpc = L->ci->savedpc; + L->allowhook = old_allowhooks; + restore_stack_limit(L); + } + L->errfunc = old_errfunc; + return status; +} + + + +/* +** Execute a protected parser. +*/ +struct SParser { /* data to `f_parser' */ + ZIO *z; + Mbuffer buff; /* buffer to be used by the scanner */ + const char *name; +}; + +static void f_parser (lua_State *L, void *ud) { + int i; + Proto *tf; + Closure *cl; + struct SParser *p = cast(struct SParser *, ud); + luaZ_lookahead(p->z); + luaC_checkGC(L); + tf = (luaY_parser)(L, p->z, + &p->buff, p->name); + cl = luaF_newLclosure(L, tf->nups, hvalue(gt(L))); + cl->l.p = tf; + for (i = 0; i < tf->nups; i++) /* initialize eventual upvalues */ + cl->l.upvals[i] = luaF_newupval(L); + setclvalue(L, L->top, cl); + incr_top(L); +} + + +int luaD_protectedparser (lua_State *L, ZIO *z, const char *name) { + struct SParser p; + int status; + p.z = z; p.name = name; + luaZ_initbuffer(L, &p.buff); + status = luaD_pcall(L, f_parser, &p, savestack(L, L->top), L->errfunc); + luaZ_freebuffer(L, &p.buff); + return status; +} + + diff --git a/deps/lua/src/ldo.h b/deps/lua/src/ldo.h new file mode 100644 index 0000000..98fddac --- /dev/null +++ b/deps/lua/src/ldo.h @@ -0,0 +1,57 @@ +/* +** $Id: ldo.h,v 2.7.1.1 2007/12/27 13:02:25 roberto Exp $ +** Stack and Call structure of Lua +** See Copyright Notice in lua.h +*/ + +#ifndef ldo_h +#define ldo_h + + +#include "lobject.h" +#include "lstate.h" +#include "lzio.h" + + +#define luaD_checkstack(L,n) \ + if ((char *)L->stack_last - (char *)L->top <= (n)*(int)sizeof(TValue)) \ + luaD_growstack(L, n); \ + else condhardstacktests(luaD_reallocstack(L, L->stacksize - EXTRA_STACK - 1)); + + +#define incr_top(L) {luaD_checkstack(L,1); L->top++;} + +#define savestack(L,p) ((char *)(p) - (char *)L->stack) +#define restorestack(L,n) ((TValue *)((char *)L->stack + (n))) + +#define saveci(L,p) ((char *)(p) - (char *)L->base_ci) +#define restoreci(L,n) ((CallInfo *)((char *)L->base_ci + (n))) + + +/* results from luaD_precall */ +#define PCRLUA 0 /* initiated a call to a Lua function */ +#define PCRC 1 /* did a call to a C function */ +#define PCRYIELD 2 /* C funtion yielded */ + + +/* type of protected functions, to be ran by `runprotected' */ +typedef void (*Pfunc) (lua_State *L, void *ud); + +LUAI_FUNC int luaD_protectedparser (lua_State *L, ZIO *z, const char *name); +LUAI_FUNC void luaD_callhook (lua_State *L, int event, int line); +LUAI_FUNC int luaD_precall (lua_State *L, StkId func, int nresults); +LUAI_FUNC void luaD_call (lua_State *L, StkId func, int nResults); +LUAI_FUNC int luaD_pcall (lua_State *L, Pfunc func, void *u, + ptrdiff_t oldtop, ptrdiff_t ef); +LUAI_FUNC int luaD_poscall (lua_State *L, StkId firstResult); +LUAI_FUNC void luaD_reallocCI (lua_State *L, int newsize); +LUAI_FUNC void luaD_reallocstack (lua_State *L, int newsize); +LUAI_FUNC void luaD_growstack (lua_State *L, int n); + +LUAI_FUNC void luaD_throw (lua_State *L, int errcode); +LUAI_FUNC int luaD_rawrunprotected (lua_State *L, Pfunc f, void *ud); + +LUAI_FUNC void luaD_seterrorobj (lua_State *L, int errcode, StkId oldtop); + +#endif + diff --git a/deps/lua/src/ldump.c b/deps/lua/src/ldump.c new file mode 100644 index 0000000..c9d3d48 --- /dev/null +++ b/deps/lua/src/ldump.c @@ -0,0 +1,164 @@ +/* +** $Id: ldump.c,v 2.8.1.1 2007/12/27 13:02:25 roberto Exp $ +** save precompiled Lua chunks +** See Copyright Notice in lua.h +*/ + +#include + +#define ldump_c +#define LUA_CORE + +#include "lua.h" + +#include "lobject.h" +#include "lstate.h" +#include "lundump.h" + +typedef struct { + lua_State* L; + lua_Writer writer; + void* data; + int strip; + int status; +} DumpState; + +#define DumpMem(b,n,size,D) DumpBlock(b,(n)*(size),D) +#define DumpVar(x,D) DumpMem(&x,1,sizeof(x),D) + +static void DumpBlock(const void* b, size_t size, DumpState* D) +{ + if (D->status==0) + { + lua_unlock(D->L); + D->status=(*D->writer)(D->L,b,size,D->data); + lua_lock(D->L); + } +} + +static void DumpChar(int y, DumpState* D) +{ + char x=(char)y; + DumpVar(x,D); +} + +static void DumpInt(int x, DumpState* D) +{ + DumpVar(x,D); +} + +static void DumpNumber(lua_Number x, DumpState* D) +{ + DumpVar(x,D); +} + +static void DumpVector(const void* b, int n, size_t size, DumpState* D) +{ + DumpInt(n,D); + DumpMem(b,n,size,D); +} + +static void DumpString(const TString* s, DumpState* D) +{ + if (s==NULL || getstr(s)==NULL) + { + size_t size=0; + DumpVar(size,D); + } + else + { + size_t size=s->tsv.len+1; /* include trailing '\0' */ + DumpVar(size,D); + DumpBlock(getstr(s),size,D); + } +} + +#define DumpCode(f,D) DumpVector(f->code,f->sizecode,sizeof(Instruction),D) + +static void DumpFunction(const Proto* f, const TString* p, DumpState* D); + +static void DumpConstants(const Proto* f, DumpState* D) +{ + int i,n=f->sizek; + DumpInt(n,D); + for (i=0; ik[i]; + DumpChar(ttype(o),D); + switch (ttype(o)) + { + case LUA_TNIL: + break; + case LUA_TBOOLEAN: + DumpChar(bvalue(o),D); + break; + case LUA_TNUMBER: + DumpNumber(nvalue(o),D); + break; + case LUA_TSTRING: + DumpString(rawtsvalue(o),D); + break; + default: + lua_assert(0); /* cannot happen */ + break; + } + } + n=f->sizep; + DumpInt(n,D); + for (i=0; ip[i],f->source,D); +} + +static void DumpDebug(const Proto* f, DumpState* D) +{ + int i,n; + n= (D->strip) ? 0 : f->sizelineinfo; + DumpVector(f->lineinfo,n,sizeof(int),D); + n= (D->strip) ? 0 : f->sizelocvars; + DumpInt(n,D); + for (i=0; ilocvars[i].varname,D); + DumpInt(f->locvars[i].startpc,D); + DumpInt(f->locvars[i].endpc,D); + } + n= (D->strip) ? 0 : f->sizeupvalues; + DumpInt(n,D); + for (i=0; iupvalues[i],D); +} + +static void DumpFunction(const Proto* f, const TString* p, DumpState* D) +{ + DumpString((f->source==p || D->strip) ? NULL : f->source,D); + DumpInt(f->linedefined,D); + DumpInt(f->lastlinedefined,D); + DumpChar(f->nups,D); + DumpChar(f->numparams,D); + DumpChar(f->is_vararg,D); + DumpChar(f->maxstacksize,D); + DumpCode(f,D); + DumpConstants(f,D); + DumpDebug(f,D); +} + +static void DumpHeader(DumpState* D) +{ + char h[LUAC_HEADERSIZE]; + luaU_header(h); + DumpBlock(h,LUAC_HEADERSIZE,D); +} + +/* +** dump Lua function as precompiled chunk +*/ +int luaU_dump (lua_State* L, const Proto* f, lua_Writer w, void* data, int strip) +{ + DumpState D; + D.L=L; + D.writer=w; + D.data=data; + D.strip=strip; + D.status=0; + DumpHeader(&D); + DumpFunction(f,NULL,&D); + return D.status; +} diff --git a/deps/lua/src/lfunc.c b/deps/lua/src/lfunc.c new file mode 100644 index 0000000..813e88f --- /dev/null +++ b/deps/lua/src/lfunc.c @@ -0,0 +1,174 @@ +/* +** $Id: lfunc.c,v 2.12.1.2 2007/12/28 14:58:43 roberto Exp $ +** Auxiliary functions to manipulate prototypes and closures +** See Copyright Notice in lua.h +*/ + + +#include + +#define lfunc_c +#define LUA_CORE + +#include "lua.h" + +#include "lfunc.h" +#include "lgc.h" +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" + + + +Closure *luaF_newCclosure (lua_State *L, int nelems, Table *e) { + Closure *c = cast(Closure *, luaM_malloc(L, sizeCclosure(nelems))); + luaC_link(L, obj2gco(c), LUA_TFUNCTION); + c->c.isC = 1; + c->c.env = e; + c->c.nupvalues = cast_byte(nelems); + return c; +} + + +Closure *luaF_newLclosure (lua_State *L, int nelems, Table *e) { + Closure *c = cast(Closure *, luaM_malloc(L, sizeLclosure(nelems))); + luaC_link(L, obj2gco(c), LUA_TFUNCTION); + c->l.isC = 0; + c->l.env = e; + c->l.nupvalues = cast_byte(nelems); + while (nelems--) c->l.upvals[nelems] = NULL; + return c; +} + + +UpVal *luaF_newupval (lua_State *L) { + UpVal *uv = luaM_new(L, UpVal); + luaC_link(L, obj2gco(uv), LUA_TUPVAL); + uv->v = &uv->u.value; + setnilvalue(uv->v); + return uv; +} + + +UpVal *luaF_findupval (lua_State *L, StkId level) { + global_State *g = G(L); + GCObject **pp = &L->openupval; + UpVal *p; + UpVal *uv; + while (*pp != NULL && (p = ngcotouv(*pp))->v >= level) { + lua_assert(p->v != &p->u.value); + if (p->v == level) { /* found a corresponding upvalue? */ + if (isdead(g, obj2gco(p))) /* is it dead? */ + changewhite(obj2gco(p)); /* ressurect it */ + return p; + } + pp = &p->next; + } + uv = luaM_new(L, UpVal); /* not found: create a new one */ + uv->tt = LUA_TUPVAL; + uv->marked = luaC_white(g); + uv->v = level; /* current value lives in the stack */ + uv->next = *pp; /* chain it in the proper position */ + *pp = obj2gco(uv); + uv->u.l.prev = &g->uvhead; /* double link it in `uvhead' list */ + uv->u.l.next = g->uvhead.u.l.next; + uv->u.l.next->u.l.prev = uv; + g->uvhead.u.l.next = uv; + lua_assert(uv->u.l.next->u.l.prev == uv && uv->u.l.prev->u.l.next == uv); + return uv; +} + + +static void unlinkupval (UpVal *uv) { + lua_assert(uv->u.l.next->u.l.prev == uv && uv->u.l.prev->u.l.next == uv); + uv->u.l.next->u.l.prev = uv->u.l.prev; /* remove from `uvhead' list */ + uv->u.l.prev->u.l.next = uv->u.l.next; +} + + +void luaF_freeupval (lua_State *L, UpVal *uv) { + if (uv->v != &uv->u.value) /* is it open? */ + unlinkupval(uv); /* remove from open list */ + luaM_free(L, uv); /* free upvalue */ +} + + +void luaF_close (lua_State *L, StkId level) { + UpVal *uv; + global_State *g = G(L); + while (L->openupval != NULL && (uv = ngcotouv(L->openupval))->v >= level) { + GCObject *o = obj2gco(uv); + lua_assert(!isblack(o) && uv->v != &uv->u.value); + L->openupval = uv->next; /* remove from `open' list */ + if (isdead(g, o)) + luaF_freeupval(L, uv); /* free upvalue */ + else { + unlinkupval(uv); + setobj(L, &uv->u.value, uv->v); + uv->v = &uv->u.value; /* now current value lives here */ + luaC_linkupval(L, uv); /* link upvalue into `gcroot' list */ + } + } +} + + +Proto *luaF_newproto (lua_State *L) { + Proto *f = luaM_new(L, Proto); + luaC_link(L, obj2gco(f), LUA_TPROTO); + f->k = NULL; + f->sizek = 0; + f->p = NULL; + f->sizep = 0; + f->code = NULL; + f->sizecode = 0; + f->sizelineinfo = 0; + f->sizeupvalues = 0; + f->nups = 0; + f->upvalues = NULL; + f->numparams = 0; + f->is_vararg = 0; + f->maxstacksize = 0; + f->lineinfo = NULL; + f->sizelocvars = 0; + f->locvars = NULL; + f->linedefined = 0; + f->lastlinedefined = 0; + f->source = NULL; + return f; +} + + +void luaF_freeproto (lua_State *L, Proto *f) { + luaM_freearray(L, f->code, f->sizecode, Instruction); + luaM_freearray(L, f->p, f->sizep, Proto *); + luaM_freearray(L, f->k, f->sizek, TValue); + luaM_freearray(L, f->lineinfo, f->sizelineinfo, int); + luaM_freearray(L, f->locvars, f->sizelocvars, struct LocVar); + luaM_freearray(L, f->upvalues, f->sizeupvalues, TString *); + luaM_free(L, f); +} + + +void luaF_freeclosure (lua_State *L, Closure *c) { + int size = (c->c.isC) ? sizeCclosure(c->c.nupvalues) : + sizeLclosure(c->l.nupvalues); + luaM_freemem(L, c, size); +} + + +/* +** Look for n-th local variable at line `line' in function `func'. +** Returns NULL if not found. +*/ +const char *luaF_getlocalname (const Proto *f, int local_number, int pc) { + int i; + for (i = 0; isizelocvars && f->locvars[i].startpc <= pc; i++) { + if (pc < f->locvars[i].endpc) { /* is variable active? */ + local_number--; + if (local_number == 0) + return getstr(f->locvars[i].varname); + } + } + return NULL; /* not found */ +} + diff --git a/deps/lua/src/lfunc.h b/deps/lua/src/lfunc.h new file mode 100644 index 0000000..a68cf51 --- /dev/null +++ b/deps/lua/src/lfunc.h @@ -0,0 +1,34 @@ +/* +** $Id: lfunc.h,v 2.4.1.1 2007/12/27 13:02:25 roberto Exp $ +** Auxiliary functions to manipulate prototypes and closures +** See Copyright Notice in lua.h +*/ + +#ifndef lfunc_h +#define lfunc_h + + +#include "lobject.h" + + +#define sizeCclosure(n) (cast(int, sizeof(CClosure)) + \ + cast(int, sizeof(TValue)*((n)-1))) + +#define sizeLclosure(n) (cast(int, sizeof(LClosure)) + \ + cast(int, sizeof(TValue *)*((n)-1))) + + +LUAI_FUNC Proto *luaF_newproto (lua_State *L); +LUAI_FUNC Closure *luaF_newCclosure (lua_State *L, int nelems, Table *e); +LUAI_FUNC Closure *luaF_newLclosure (lua_State *L, int nelems, Table *e); +LUAI_FUNC UpVal *luaF_newupval (lua_State *L); +LUAI_FUNC UpVal *luaF_findupval (lua_State *L, StkId level); +LUAI_FUNC void luaF_close (lua_State *L, StkId level); +LUAI_FUNC void luaF_freeproto (lua_State *L, Proto *f); +LUAI_FUNC void luaF_freeclosure (lua_State *L, Closure *c); +LUAI_FUNC void luaF_freeupval (lua_State *L, UpVal *uv); +LUAI_FUNC const char *luaF_getlocalname (const Proto *func, int local_number, + int pc); + + +#endif diff --git a/deps/lua/src/lgc.c b/deps/lua/src/lgc.c new file mode 100644 index 0000000..e909c79 --- /dev/null +++ b/deps/lua/src/lgc.c @@ -0,0 +1,710 @@ +/* +** $Id: lgc.c,v 2.38.1.2 2011/03/18 18:05:38 roberto Exp $ +** Garbage Collector +** See Copyright Notice in lua.h +*/ + +#include + +#define lgc_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lgc.h" +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" + + +#define GCSTEPSIZE 1024u +#define GCSWEEPMAX 40 +#define GCSWEEPCOST 10 +#define GCFINALIZECOST 100 + + +#define maskmarks cast_byte(~(bitmask(BLACKBIT)|WHITEBITS)) + +#define makewhite(g,x) \ + ((x)->gch.marked = cast_byte(((x)->gch.marked & maskmarks) | luaC_white(g))) + +#define white2gray(x) reset2bits((x)->gch.marked, WHITE0BIT, WHITE1BIT) +#define black2gray(x) resetbit((x)->gch.marked, BLACKBIT) + +#define stringmark(s) reset2bits((s)->tsv.marked, WHITE0BIT, WHITE1BIT) + + +#define isfinalized(u) testbit((u)->marked, FINALIZEDBIT) +#define markfinalized(u) l_setbit((u)->marked, FINALIZEDBIT) + + +#define KEYWEAK bitmask(KEYWEAKBIT) +#define VALUEWEAK bitmask(VALUEWEAKBIT) + + + +#define markvalue(g,o) { checkconsistency(o); \ + if (iscollectable(o) && iswhite(gcvalue(o))) reallymarkobject(g,gcvalue(o)); } + +#define markobject(g,t) { if (iswhite(obj2gco(t))) \ + reallymarkobject(g, obj2gco(t)); } + + +#define setthreshold(g) (g->GCthreshold = (g->estimate/100) * g->gcpause) + + +static void removeentry (Node *n) { + lua_assert(ttisnil(gval(n))); + if (iscollectable(gkey(n))) + setttype(gkey(n), LUA_TDEADKEY); /* dead key; remove it */ +} + + +static void reallymarkobject (global_State *g, GCObject *o) { + lua_assert(iswhite(o) && !isdead(g, o)); + white2gray(o); + switch (o->gch.tt) { + case LUA_TSTRING: { + return; + } + case LUA_TUSERDATA: { + Table *mt = gco2u(o)->metatable; + gray2black(o); /* udata are never gray */ + if (mt) markobject(g, mt); + markobject(g, gco2u(o)->env); + return; + } + case LUA_TUPVAL: { + UpVal *uv = gco2uv(o); + markvalue(g, uv->v); + if (uv->v == &uv->u.value) /* closed? */ + gray2black(o); /* open upvalues are never black */ + return; + } + case LUA_TFUNCTION: { + gco2cl(o)->c.gclist = g->gray; + g->gray = o; + break; + } + case LUA_TTABLE: { + gco2h(o)->gclist = g->gray; + g->gray = o; + break; + } + case LUA_TTHREAD: { + gco2th(o)->gclist = g->gray; + g->gray = o; + break; + } + case LUA_TPROTO: { + gco2p(o)->gclist = g->gray; + g->gray = o; + break; + } + default: lua_assert(0); + } +} + + +static void marktmu (global_State *g) { + GCObject *u = g->tmudata; + if (u) { + do { + u = u->gch.next; + makewhite(g, u); /* may be marked, if left from previous GC */ + reallymarkobject(g, u); + } while (u != g->tmudata); + } +} + + +/* move `dead' udata that need finalization to list `tmudata' */ +size_t luaC_separateudata (lua_State *L, int all) { + global_State *g = G(L); + size_t deadmem = 0; + GCObject **p = &g->mainthread->next; + GCObject *curr; + while ((curr = *p) != NULL) { + if (!(iswhite(curr) || all) || isfinalized(gco2u(curr))) + p = &curr->gch.next; /* don't bother with them */ + else if (fasttm(L, gco2u(curr)->metatable, TM_GC) == NULL) { + markfinalized(gco2u(curr)); /* don't need finalization */ + p = &curr->gch.next; + } + else { /* must call its gc method */ + deadmem += sizeudata(gco2u(curr)); + markfinalized(gco2u(curr)); + *p = curr->gch.next; + /* link `curr' at the end of `tmudata' list */ + if (g->tmudata == NULL) /* list is empty? */ + g->tmudata = curr->gch.next = curr; /* creates a circular list */ + else { + curr->gch.next = g->tmudata->gch.next; + g->tmudata->gch.next = curr; + g->tmudata = curr; + } + } + } + return deadmem; +} + + +static int traversetable (global_State *g, Table *h) { + int i; + int weakkey = 0; + int weakvalue = 0; + const TValue *mode; + if (h->metatable) + markobject(g, h->metatable); + mode = gfasttm(g, h->metatable, TM_MODE); + if (mode && ttisstring(mode)) { /* is there a weak mode? */ + weakkey = (strchr(svalue(mode), 'k') != NULL); + weakvalue = (strchr(svalue(mode), 'v') != NULL); + if (weakkey || weakvalue) { /* is really weak? */ + h->marked &= ~(KEYWEAK | VALUEWEAK); /* clear bits */ + h->marked |= cast_byte((weakkey << KEYWEAKBIT) | + (weakvalue << VALUEWEAKBIT)); + h->gclist = g->weak; /* must be cleared after GC, ... */ + g->weak = obj2gco(h); /* ... so put in the appropriate list */ + } + } + if (weakkey && weakvalue) return 1; + if (!weakvalue) { + i = h->sizearray; + while (i--) + markvalue(g, &h->array[i]); + } + i = sizenode(h); + while (i--) { + Node *n = gnode(h, i); + lua_assert(ttype(gkey(n)) != LUA_TDEADKEY || ttisnil(gval(n))); + if (ttisnil(gval(n))) + removeentry(n); /* remove empty entries */ + else { + lua_assert(!ttisnil(gkey(n))); + if (!weakkey) markvalue(g, gkey(n)); + if (!weakvalue) markvalue(g, gval(n)); + } + } + return weakkey || weakvalue; +} + + +/* +** All marks are conditional because a GC may happen while the +** prototype is still being created +*/ +static void traverseproto (global_State *g, Proto *f) { + int i; + if (f->source) stringmark(f->source); + for (i=0; isizek; i++) /* mark literals */ + markvalue(g, &f->k[i]); + for (i=0; isizeupvalues; i++) { /* mark upvalue names */ + if (f->upvalues[i]) + stringmark(f->upvalues[i]); + } + for (i=0; isizep; i++) { /* mark nested protos */ + if (f->p[i]) + markobject(g, f->p[i]); + } + for (i=0; isizelocvars; i++) { /* mark local-variable names */ + if (f->locvars[i].varname) + stringmark(f->locvars[i].varname); + } +} + + + +static void traverseclosure (global_State *g, Closure *cl) { + markobject(g, cl->c.env); + if (cl->c.isC) { + int i; + for (i=0; ic.nupvalues; i++) /* mark its upvalues */ + markvalue(g, &cl->c.upvalue[i]); + } + else { + int i; + lua_assert(cl->l.nupvalues == cl->l.p->nups); + markobject(g, cl->l.p); + for (i=0; il.nupvalues; i++) /* mark its upvalues */ + markobject(g, cl->l.upvals[i]); + } +} + + +static void checkstacksizes (lua_State *L, StkId max) { + int ci_used = cast_int(L->ci - L->base_ci); /* number of `ci' in use */ + int s_used = cast_int(max - L->stack); /* part of stack in use */ + if (L->size_ci > LUAI_MAXCALLS) /* handling overflow? */ + return; /* do not touch the stacks */ + if (4*ci_used < L->size_ci && 2*BASIC_CI_SIZE < L->size_ci) + luaD_reallocCI(L, L->size_ci/2); /* still big enough... */ + condhardstacktests(luaD_reallocCI(L, ci_used + 1)); + if (4*s_used < L->stacksize && + 2*(BASIC_STACK_SIZE+EXTRA_STACK) < L->stacksize) + luaD_reallocstack(L, L->stacksize/2); /* still big enough... */ + condhardstacktests(luaD_reallocstack(L, s_used)); +} + + +static void traversestack (global_State *g, lua_State *l) { + StkId o, lim; + CallInfo *ci; + markvalue(g, gt(l)); + lim = l->top; + for (ci = l->base_ci; ci <= l->ci; ci++) { + lua_assert(ci->top <= l->stack_last); + if (lim < ci->top) lim = ci->top; + } + for (o = l->stack; o < l->top; o++) + markvalue(g, o); + for (; o <= lim; o++) + setnilvalue(o); + checkstacksizes(l, lim); +} + + +/* +** traverse one gray object, turning it to black. +** Returns `quantity' traversed. +*/ +static l_mem propagatemark (global_State *g) { + GCObject *o = g->gray; + lua_assert(isgray(o)); + gray2black(o); + switch (o->gch.tt) { + case LUA_TTABLE: { + Table *h = gco2h(o); + g->gray = h->gclist; + if (traversetable(g, h)) /* table is weak? */ + black2gray(o); /* keep it gray */ + return sizeof(Table) + sizeof(TValue) * h->sizearray + + sizeof(Node) * sizenode(h); + } + case LUA_TFUNCTION: { + Closure *cl = gco2cl(o); + g->gray = cl->c.gclist; + traverseclosure(g, cl); + return (cl->c.isC) ? sizeCclosure(cl->c.nupvalues) : + sizeLclosure(cl->l.nupvalues); + } + case LUA_TTHREAD: { + lua_State *th = gco2th(o); + g->gray = th->gclist; + th->gclist = g->grayagain; + g->grayagain = o; + black2gray(o); + traversestack(g, th); + return sizeof(lua_State) + sizeof(TValue) * th->stacksize + + sizeof(CallInfo) * th->size_ci; + } + case LUA_TPROTO: { + Proto *p = gco2p(o); + g->gray = p->gclist; + traverseproto(g, p); + return sizeof(Proto) + sizeof(Instruction) * p->sizecode + + sizeof(Proto *) * p->sizep + + sizeof(TValue) * p->sizek + + sizeof(int) * p->sizelineinfo + + sizeof(LocVar) * p->sizelocvars + + sizeof(TString *) * p->sizeupvalues; + } + default: lua_assert(0); return 0; + } +} + + +static size_t propagateall (global_State *g) { + size_t m = 0; + while (g->gray) m += propagatemark(g); + return m; +} + + +/* +** The next function tells whether a key or value can be cleared from +** a weak table. Non-collectable objects are never removed from weak +** tables. Strings behave as `values', so are never removed too. for +** other objects: if really collected, cannot keep them; for userdata +** being finalized, keep them in keys, but not in values +*/ +static int iscleared (const TValue *o, int iskey) { + if (!iscollectable(o)) return 0; + if (ttisstring(o)) { + stringmark(rawtsvalue(o)); /* strings are `values', so are never weak */ + return 0; + } + return iswhite(gcvalue(o)) || + (ttisuserdata(o) && (!iskey && isfinalized(uvalue(o)))); +} + + +/* +** clear collected entries from weaktables +*/ +static void cleartable (GCObject *l) { + while (l) { + Table *h = gco2h(l); + int i = h->sizearray; + lua_assert(testbit(h->marked, VALUEWEAKBIT) || + testbit(h->marked, KEYWEAKBIT)); + if (testbit(h->marked, VALUEWEAKBIT)) { + while (i--) { + TValue *o = &h->array[i]; + if (iscleared(o, 0)) /* value was collected? */ + setnilvalue(o); /* remove value */ + } + } + i = sizenode(h); + while (i--) { + Node *n = gnode(h, i); + if (!ttisnil(gval(n)) && /* non-empty entry? */ + (iscleared(key2tval(n), 1) || iscleared(gval(n), 0))) { + setnilvalue(gval(n)); /* remove value ... */ + removeentry(n); /* remove entry from table */ + } + } + l = h->gclist; + } +} + + +static void freeobj (lua_State *L, GCObject *o) { + switch (o->gch.tt) { + case LUA_TPROTO: luaF_freeproto(L, gco2p(o)); break; + case LUA_TFUNCTION: luaF_freeclosure(L, gco2cl(o)); break; + case LUA_TUPVAL: luaF_freeupval(L, gco2uv(o)); break; + case LUA_TTABLE: luaH_free(L, gco2h(o)); break; + case LUA_TTHREAD: { + lua_assert(gco2th(o) != L && gco2th(o) != G(L)->mainthread); + luaE_freethread(L, gco2th(o)); + break; + } + case LUA_TSTRING: { + G(L)->strt.nuse--; + luaM_freemem(L, o, sizestring(gco2ts(o))); + break; + } + case LUA_TUSERDATA: { + luaM_freemem(L, o, sizeudata(gco2u(o))); + break; + } + default: lua_assert(0); + } +} + + + +#define sweepwholelist(L,p) sweeplist(L,p,MAX_LUMEM) + + +static GCObject **sweeplist (lua_State *L, GCObject **p, lu_mem count) { + GCObject *curr; + global_State *g = G(L); + int deadmask = otherwhite(g); + while ((curr = *p) != NULL && count-- > 0) { + if (curr->gch.tt == LUA_TTHREAD) /* sweep open upvalues of each thread */ + sweepwholelist(L, &gco2th(curr)->openupval); + if ((curr->gch.marked ^ WHITEBITS) & deadmask) { /* not dead? */ + lua_assert(!isdead(g, curr) || testbit(curr->gch.marked, FIXEDBIT)); + makewhite(g, curr); /* make it white (for next cycle) */ + p = &curr->gch.next; + } + else { /* must erase `curr' */ + lua_assert(isdead(g, curr) || deadmask == bitmask(SFIXEDBIT)); + *p = curr->gch.next; + if (curr == g->rootgc) /* is the first element of the list? */ + g->rootgc = curr->gch.next; /* adjust first */ + freeobj(L, curr); + } + } + return p; +} + + +static void checkSizes (lua_State *L) { + global_State *g = G(L); + /* check size of string hash */ + if (g->strt.nuse < cast(lu_int32, g->strt.size/4) && + g->strt.size > MINSTRTABSIZE*2) + luaS_resize(L, g->strt.size/2); /* table is too big */ + /* check size of buffer */ + if (luaZ_sizebuffer(&g->buff) > LUA_MINBUFFER*2) { /* buffer too big? */ + size_t newsize = luaZ_sizebuffer(&g->buff) / 2; + luaZ_resizebuffer(L, &g->buff, newsize); + } +} + + +static void GCTM (lua_State *L) { + global_State *g = G(L); + GCObject *o = g->tmudata->gch.next; /* get first element */ + Udata *udata = rawgco2u(o); + const TValue *tm; + /* remove udata from `tmudata' */ + if (o == g->tmudata) /* last element? */ + g->tmudata = NULL; + else + g->tmudata->gch.next = udata->uv.next; + udata->uv.next = g->mainthread->next; /* return it to `root' list */ + g->mainthread->next = o; + makewhite(g, o); + tm = fasttm(L, udata->uv.metatable, TM_GC); + if (tm != NULL) { + lu_byte oldah = L->allowhook; + lu_mem oldt = g->GCthreshold; + L->allowhook = 0; /* stop debug hooks during GC tag method */ + g->GCthreshold = 2*g->totalbytes; /* avoid GC steps */ + setobj2s(L, L->top, tm); + setuvalue(L, L->top+1, udata); + L->top += 2; + luaD_call(L, L->top - 2, 0); + L->allowhook = oldah; /* restore hooks */ + g->GCthreshold = oldt; /* restore threshold */ + } +} + + +/* +** Call all GC tag methods +*/ +void luaC_callGCTM (lua_State *L) { + while (G(L)->tmudata) + GCTM(L); +} + + +void luaC_freeall (lua_State *L) { + global_State *g = G(L); + int i; + g->currentwhite = WHITEBITS | bitmask(SFIXEDBIT); /* mask to collect all elements */ + sweepwholelist(L, &g->rootgc); + for (i = 0; i < g->strt.size; i++) /* free all string lists */ + sweepwholelist(L, &g->strt.hash[i]); +} + + +static void markmt (global_State *g) { + int i; + for (i=0; imt[i]) markobject(g, g->mt[i]); +} + + +/* mark root set */ +static void markroot (lua_State *L) { + global_State *g = G(L); + g->gray = NULL; + g->grayagain = NULL; + g->weak = NULL; + markobject(g, g->mainthread); + /* make global table be traversed before main stack */ + markvalue(g, gt(g->mainthread)); + markvalue(g, registry(L)); + markmt(g); + g->gcstate = GCSpropagate; +} + + +static void remarkupvals (global_State *g) { + UpVal *uv; + for (uv = g->uvhead.u.l.next; uv != &g->uvhead; uv = uv->u.l.next) { + lua_assert(uv->u.l.next->u.l.prev == uv && uv->u.l.prev->u.l.next == uv); + if (isgray(obj2gco(uv))) + markvalue(g, uv->v); + } +} + + +static void atomic (lua_State *L) { + global_State *g = G(L); + size_t udsize; /* total size of userdata to be finalized */ + /* remark occasional upvalues of (maybe) dead threads */ + remarkupvals(g); + /* traverse objects cautch by write barrier and by 'remarkupvals' */ + propagateall(g); + /* remark weak tables */ + g->gray = g->weak; + g->weak = NULL; + lua_assert(!iswhite(obj2gco(g->mainthread))); + markobject(g, L); /* mark running thread */ + markmt(g); /* mark basic metatables (again) */ + propagateall(g); + /* remark gray again */ + g->gray = g->grayagain; + g->grayagain = NULL; + propagateall(g); + udsize = luaC_separateudata(L, 0); /* separate userdata to be finalized */ + marktmu(g); /* mark `preserved' userdata */ + udsize += propagateall(g); /* remark, to propagate `preserveness' */ + cleartable(g->weak); /* remove collected objects from weak tables */ + /* flip current white */ + g->currentwhite = cast_byte(otherwhite(g)); + g->sweepstrgc = 0; + g->sweepgc = &g->rootgc; + g->gcstate = GCSsweepstring; + g->estimate = g->totalbytes - udsize; /* first estimate */ +} + + +static l_mem singlestep (lua_State *L) { + global_State *g = G(L); + /*lua_checkmemory(L);*/ + switch (g->gcstate) { + case GCSpause: { + markroot(L); /* start a new collection */ + return 0; + } + case GCSpropagate: { + if (g->gray) + return propagatemark(g); + else { /* no more `gray' objects */ + atomic(L); /* finish mark phase */ + return 0; + } + } + case GCSsweepstring: { + lu_mem old = g->totalbytes; + sweepwholelist(L, &g->strt.hash[g->sweepstrgc++]); + if (g->sweepstrgc >= g->strt.size) /* nothing more to sweep? */ + g->gcstate = GCSsweep; /* end sweep-string phase */ + lua_assert(old >= g->totalbytes); + g->estimate -= old - g->totalbytes; + return GCSWEEPCOST; + } + case GCSsweep: { + lu_mem old = g->totalbytes; + g->sweepgc = sweeplist(L, g->sweepgc, GCSWEEPMAX); + if (*g->sweepgc == NULL) { /* nothing more to sweep? */ + checkSizes(L); + g->gcstate = GCSfinalize; /* end sweep phase */ + } + lua_assert(old >= g->totalbytes); + g->estimate -= old - g->totalbytes; + return GCSWEEPMAX*GCSWEEPCOST; + } + case GCSfinalize: { + if (g->tmudata) { + GCTM(L); + if (g->estimate > GCFINALIZECOST) + g->estimate -= GCFINALIZECOST; + return GCFINALIZECOST; + } + else { + g->gcstate = GCSpause; /* end collection */ + g->gcdept = 0; + return 0; + } + } + default: lua_assert(0); return 0; + } +} + + +void luaC_step (lua_State *L) { + global_State *g = G(L); + l_mem lim = (GCSTEPSIZE/100) * g->gcstepmul; + if (lim == 0) + lim = (MAX_LUMEM-1)/2; /* no limit */ + g->gcdept += g->totalbytes - g->GCthreshold; + do { + lim -= singlestep(L); + if (g->gcstate == GCSpause) + break; + } while (lim > 0); + if (g->gcstate != GCSpause) { + if (g->gcdept < GCSTEPSIZE) + g->GCthreshold = g->totalbytes + GCSTEPSIZE; /* - lim/g->gcstepmul;*/ + else { + g->gcdept -= GCSTEPSIZE; + g->GCthreshold = g->totalbytes; + } + } + else { + setthreshold(g); + } +} + + +void luaC_fullgc (lua_State *L) { + global_State *g = G(L); + if (g->gcstate <= GCSpropagate) { + /* reset sweep marks to sweep all elements (returning them to white) */ + g->sweepstrgc = 0; + g->sweepgc = &g->rootgc; + /* reset other collector lists */ + g->gray = NULL; + g->grayagain = NULL; + g->weak = NULL; + g->gcstate = GCSsweepstring; + } + lua_assert(g->gcstate != GCSpause && g->gcstate != GCSpropagate); + /* finish any pending sweep phase */ + while (g->gcstate != GCSfinalize) { + lua_assert(g->gcstate == GCSsweepstring || g->gcstate == GCSsweep); + singlestep(L); + } + markroot(L); + while (g->gcstate != GCSpause) { + singlestep(L); + } + setthreshold(g); +} + + +void luaC_barrierf (lua_State *L, GCObject *o, GCObject *v) { + global_State *g = G(L); + lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o)); + lua_assert(g->gcstate != GCSfinalize && g->gcstate != GCSpause); + lua_assert(ttype(&o->gch) != LUA_TTABLE); + /* must keep invariant? */ + if (g->gcstate == GCSpropagate) + reallymarkobject(g, v); /* restore invariant */ + else /* don't mind */ + makewhite(g, o); /* mark as white just to avoid other barriers */ +} + + +void luaC_barrierback (lua_State *L, Table *t) { + global_State *g = G(L); + GCObject *o = obj2gco(t); + lua_assert(isblack(o) && !isdead(g, o)); + lua_assert(g->gcstate != GCSfinalize && g->gcstate != GCSpause); + black2gray(o); /* make table gray (again) */ + t->gclist = g->grayagain; + g->grayagain = o; +} + + +void luaC_link (lua_State *L, GCObject *o, lu_byte tt) { + global_State *g = G(L); + o->gch.next = g->rootgc; + g->rootgc = o; + o->gch.marked = luaC_white(g); + o->gch.tt = tt; +} + + +void luaC_linkupval (lua_State *L, UpVal *uv) { + global_State *g = G(L); + GCObject *o = obj2gco(uv); + o->gch.next = g->rootgc; /* link upvalue into `rootgc' list */ + g->rootgc = o; + if (isgray(o)) { + if (g->gcstate == GCSpropagate) { + gray2black(o); /* closed upvalues need barrier */ + luaC_barrier(L, uv, uv->v); + } + else { /* sweep phase: sweep it (turning it into white) */ + makewhite(g, o); + lua_assert(g->gcstate != GCSfinalize && g->gcstate != GCSpause); + } + } +} + diff --git a/deps/lua/src/lgc.h b/deps/lua/src/lgc.h new file mode 100644 index 0000000..5a8dc60 --- /dev/null +++ b/deps/lua/src/lgc.h @@ -0,0 +1,110 @@ +/* +** $Id: lgc.h,v 2.15.1.1 2007/12/27 13:02:25 roberto Exp $ +** Garbage Collector +** See Copyright Notice in lua.h +*/ + +#ifndef lgc_h +#define lgc_h + + +#include "lobject.h" + + +/* +** Possible states of the Garbage Collector +*/ +#define GCSpause 0 +#define GCSpropagate 1 +#define GCSsweepstring 2 +#define GCSsweep 3 +#define GCSfinalize 4 + + +/* +** some userful bit tricks +*/ +#define resetbits(x,m) ((x) &= cast(lu_byte, ~(m))) +#define setbits(x,m) ((x) |= (m)) +#define testbits(x,m) ((x) & (m)) +#define bitmask(b) (1<<(b)) +#define bit2mask(b1,b2) (bitmask(b1) | bitmask(b2)) +#define l_setbit(x,b) setbits(x, bitmask(b)) +#define resetbit(x,b) resetbits(x, bitmask(b)) +#define testbit(x,b) testbits(x, bitmask(b)) +#define set2bits(x,b1,b2) setbits(x, (bit2mask(b1, b2))) +#define reset2bits(x,b1,b2) resetbits(x, (bit2mask(b1, b2))) +#define test2bits(x,b1,b2) testbits(x, (bit2mask(b1, b2))) + + + +/* +** Layout for bit use in `marked' field: +** bit 0 - object is white (type 0) +** bit 1 - object is white (type 1) +** bit 2 - object is black +** bit 3 - for userdata: has been finalized +** bit 3 - for tables: has weak keys +** bit 4 - for tables: has weak values +** bit 5 - object is fixed (should not be collected) +** bit 6 - object is "super" fixed (only the main thread) +*/ + + +#define WHITE0BIT 0 +#define WHITE1BIT 1 +#define BLACKBIT 2 +#define FINALIZEDBIT 3 +#define KEYWEAKBIT 3 +#define VALUEWEAKBIT 4 +#define FIXEDBIT 5 +#define SFIXEDBIT 6 +#define WHITEBITS bit2mask(WHITE0BIT, WHITE1BIT) + + +#define iswhite(x) test2bits((x)->gch.marked, WHITE0BIT, WHITE1BIT) +#define isblack(x) testbit((x)->gch.marked, BLACKBIT) +#define isgray(x) (!isblack(x) && !iswhite(x)) + +#define otherwhite(g) (g->currentwhite ^ WHITEBITS) +#define isdead(g,v) ((v)->gch.marked & otherwhite(g) & WHITEBITS) + +#define changewhite(x) ((x)->gch.marked ^= WHITEBITS) +#define gray2black(x) l_setbit((x)->gch.marked, BLACKBIT) + +#define valiswhite(x) (iscollectable(x) && iswhite(gcvalue(x))) + +#define luaC_white(g) cast(lu_byte, (g)->currentwhite & WHITEBITS) + + +#define luaC_checkGC(L) { \ + condhardstacktests(luaD_reallocstack(L, L->stacksize - EXTRA_STACK - 1)); \ + if (G(L)->totalbytes >= G(L)->GCthreshold) \ + luaC_step(L); } + + +#define luaC_barrier(L,p,v) { if (valiswhite(v) && isblack(obj2gco(p))) \ + luaC_barrierf(L,obj2gco(p),gcvalue(v)); } + +#define luaC_barriert(L,t,v) { if (valiswhite(v) && isblack(obj2gco(t))) \ + luaC_barrierback(L,t); } + +#define luaC_objbarrier(L,p,o) \ + { if (iswhite(obj2gco(o)) && isblack(obj2gco(p))) \ + luaC_barrierf(L,obj2gco(p),obj2gco(o)); } + +#define luaC_objbarriert(L,t,o) \ + { if (iswhite(obj2gco(o)) && isblack(obj2gco(t))) luaC_barrierback(L,t); } + +LUAI_FUNC size_t luaC_separateudata (lua_State *L, int all); +LUAI_FUNC void luaC_callGCTM (lua_State *L); +LUAI_FUNC void luaC_freeall (lua_State *L); +LUAI_FUNC void luaC_step (lua_State *L); +LUAI_FUNC void luaC_fullgc (lua_State *L); +LUAI_FUNC void luaC_link (lua_State *L, GCObject *o, lu_byte tt); +LUAI_FUNC void luaC_linkupval (lua_State *L, UpVal *uv); +LUAI_FUNC void luaC_barrierf (lua_State *L, GCObject *o, GCObject *v); +LUAI_FUNC void luaC_barrierback (lua_State *L, Table *t); + + +#endif diff --git a/deps/lua/src/linit.c b/deps/lua/src/linit.c new file mode 100644 index 0000000..c1f90df --- /dev/null +++ b/deps/lua/src/linit.c @@ -0,0 +1,38 @@ +/* +** $Id: linit.c,v 1.14.1.1 2007/12/27 13:02:25 roberto Exp $ +** Initialization of libraries for lua.c +** See Copyright Notice in lua.h +*/ + + +#define linit_c +#define LUA_LIB + +#include "lua.h" + +#include "lualib.h" +#include "lauxlib.h" + + +static const luaL_Reg lualibs[] = { + {"", luaopen_base}, + {LUA_LOADLIBNAME, luaopen_package}, + {LUA_TABLIBNAME, luaopen_table}, + {LUA_IOLIBNAME, luaopen_io}, + {LUA_OSLIBNAME, luaopen_os}, + {LUA_STRLIBNAME, luaopen_string}, + {LUA_MATHLIBNAME, luaopen_math}, + {LUA_DBLIBNAME, luaopen_debug}, + {NULL, NULL} +}; + + +LUALIB_API void luaL_openlibs (lua_State *L) { + const luaL_Reg *lib = lualibs; + for (; lib->func; lib++) { + lua_pushcfunction(L, lib->func); + lua_pushstring(L, lib->name); + lua_call(L, 1, 0); + } +} + diff --git a/deps/lua/src/liolib.c b/deps/lua/src/liolib.c new file mode 100644 index 0000000..649f9a5 --- /dev/null +++ b/deps/lua/src/liolib.c @@ -0,0 +1,556 @@ +/* +** $Id: liolib.c,v 2.73.1.4 2010/05/14 15:33:51 roberto Exp $ +** Standard I/O (and system) library +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include +#include + +#define liolib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + + +#define IO_INPUT 1 +#define IO_OUTPUT 2 + + +static const char *const fnames[] = {"input", "output"}; + + +static int pushresult (lua_State *L, int i, const char *filename) { + int en = errno; /* calls to Lua API may change this value */ + if (i) { + lua_pushboolean(L, 1); + return 1; + } + else { + lua_pushnil(L); + if (filename) + lua_pushfstring(L, "%s: %s", filename, strerror(en)); + else + lua_pushfstring(L, "%s", strerror(en)); + lua_pushinteger(L, en); + return 3; + } +} + + +static void fileerror (lua_State *L, int arg, const char *filename) { + lua_pushfstring(L, "%s: %s", filename, strerror(errno)); + luaL_argerror(L, arg, lua_tostring(L, -1)); +} + + +#define tofilep(L) ((FILE **)luaL_checkudata(L, 1, LUA_FILEHANDLE)) + + +static int io_type (lua_State *L) { + void *ud; + luaL_checkany(L, 1); + ud = lua_touserdata(L, 1); + lua_getfield(L, LUA_REGISTRYINDEX, LUA_FILEHANDLE); + if (ud == NULL || !lua_getmetatable(L, 1) || !lua_rawequal(L, -2, -1)) + lua_pushnil(L); /* not a file */ + else if (*((FILE **)ud) == NULL) + lua_pushliteral(L, "closed file"); + else + lua_pushliteral(L, "file"); + return 1; +} + + +static FILE *tofile (lua_State *L) { + FILE **f = tofilep(L); + if (*f == NULL) + luaL_error(L, "attempt to use a closed file"); + return *f; +} + + + +/* +** When creating file handles, always creates a `closed' file handle +** before opening the actual file; so, if there is a memory error, the +** file is not left opened. +*/ +static FILE **newfile (lua_State *L) { + FILE **pf = (FILE **)lua_newuserdata(L, sizeof(FILE *)); + *pf = NULL; /* file handle is currently `closed' */ + luaL_getmetatable(L, LUA_FILEHANDLE); + lua_setmetatable(L, -2); + return pf; +} + + +/* +** function to (not) close the standard files stdin, stdout, and stderr +*/ +static int io_noclose (lua_State *L) { + lua_pushnil(L); + lua_pushliteral(L, "cannot close standard file"); + return 2; +} + + +/* +** function to close 'popen' files +*/ +static int io_pclose (lua_State *L) { + FILE **p = tofilep(L); + int ok = lua_pclose(L, *p); + *p = NULL; + return pushresult(L, ok, NULL); +} + + +/* +** function to close regular files +*/ +static int io_fclose (lua_State *L) { + FILE **p = tofilep(L); + int ok = (fclose(*p) == 0); + *p = NULL; + return pushresult(L, ok, NULL); +} + + +static int aux_close (lua_State *L) { + lua_getfenv(L, 1); + lua_getfield(L, -1, "__close"); + return (lua_tocfunction(L, -1))(L); +} + + +static int io_close (lua_State *L) { + if (lua_isnone(L, 1)) + lua_rawgeti(L, LUA_ENVIRONINDEX, IO_OUTPUT); + tofile(L); /* make sure argument is a file */ + return aux_close(L); +} + + +static int io_gc (lua_State *L) { + FILE *f = *tofilep(L); + /* ignore closed files */ + if (f != NULL) + aux_close(L); + return 0; +} + + +static int io_tostring (lua_State *L) { + FILE *f = *tofilep(L); + if (f == NULL) + lua_pushliteral(L, "file (closed)"); + else + lua_pushfstring(L, "file (%p)", f); + return 1; +} + + +static int io_open (lua_State *L) { + const char *filename = luaL_checkstring(L, 1); + const char *mode = luaL_optstring(L, 2, "r"); + FILE **pf = newfile(L); + *pf = fopen(filename, mode); + return (*pf == NULL) ? pushresult(L, 0, filename) : 1; +} + + +/* +** this function has a separated environment, which defines the +** correct __close for 'popen' files +*/ +static int io_popen (lua_State *L) { + const char *filename = luaL_checkstring(L, 1); + const char *mode = luaL_optstring(L, 2, "r"); + FILE **pf = newfile(L); + *pf = lua_popen(L, filename, mode); + return (*pf == NULL) ? pushresult(L, 0, filename) : 1; +} + + +static int io_tmpfile (lua_State *L) { + FILE **pf = newfile(L); + *pf = tmpfile(); + return (*pf == NULL) ? pushresult(L, 0, NULL) : 1; +} + + +static FILE *getiofile (lua_State *L, int findex) { + FILE *f; + lua_rawgeti(L, LUA_ENVIRONINDEX, findex); + f = *(FILE **)lua_touserdata(L, -1); + if (f == NULL) + luaL_error(L, "standard %s file is closed", fnames[findex - 1]); + return f; +} + + +static int g_iofile (lua_State *L, int f, const char *mode) { + if (!lua_isnoneornil(L, 1)) { + const char *filename = lua_tostring(L, 1); + if (filename) { + FILE **pf = newfile(L); + *pf = fopen(filename, mode); + if (*pf == NULL) + fileerror(L, 1, filename); + } + else { + tofile(L); /* check that it's a valid file handle */ + lua_pushvalue(L, 1); + } + lua_rawseti(L, LUA_ENVIRONINDEX, f); + } + /* return current value */ + lua_rawgeti(L, LUA_ENVIRONINDEX, f); + return 1; +} + + +static int io_input (lua_State *L) { + return g_iofile(L, IO_INPUT, "r"); +} + + +static int io_output (lua_State *L) { + return g_iofile(L, IO_OUTPUT, "w"); +} + + +static int io_readline (lua_State *L); + + +static void aux_lines (lua_State *L, int idx, int toclose) { + lua_pushvalue(L, idx); + lua_pushboolean(L, toclose); /* close/not close file when finished */ + lua_pushcclosure(L, io_readline, 2); +} + + +static int f_lines (lua_State *L) { + tofile(L); /* check that it's a valid file handle */ + aux_lines(L, 1, 0); + return 1; +} + + +static int io_lines (lua_State *L) { + if (lua_isnoneornil(L, 1)) { /* no arguments? */ + /* will iterate over default input */ + lua_rawgeti(L, LUA_ENVIRONINDEX, IO_INPUT); + return f_lines(L); + } + else { + const char *filename = luaL_checkstring(L, 1); + FILE **pf = newfile(L); + *pf = fopen(filename, "r"); + if (*pf == NULL) + fileerror(L, 1, filename); + aux_lines(L, lua_gettop(L), 1); + return 1; + } +} + + +/* +** {====================================================== +** READ +** ======================================================= +*/ + + +static int read_number (lua_State *L, FILE *f) { + lua_Number d; + if (fscanf(f, LUA_NUMBER_SCAN, &d) == 1) { + lua_pushnumber(L, d); + return 1; + } + else { + lua_pushnil(L); /* "result" to be removed */ + return 0; /* read fails */ + } +} + + +static int test_eof (lua_State *L, FILE *f) { + int c = getc(f); + ungetc(c, f); + lua_pushlstring(L, NULL, 0); + return (c != EOF); +} + + +static int read_line (lua_State *L, FILE *f) { + luaL_Buffer b; + luaL_buffinit(L, &b); + for (;;) { + size_t l; + char *p = luaL_prepbuffer(&b); + if (fgets(p, LUAL_BUFFERSIZE, f) == NULL) { /* eof? */ + luaL_pushresult(&b); /* close buffer */ + return (lua_objlen(L, -1) > 0); /* check whether read something */ + } + l = strlen(p); + if (l == 0 || p[l-1] != '\n') + luaL_addsize(&b, l); + else { + luaL_addsize(&b, l - 1); /* do not include `eol' */ + luaL_pushresult(&b); /* close buffer */ + return 1; /* read at least an `eol' */ + } + } +} + + +static int read_chars (lua_State *L, FILE *f, size_t n) { + size_t rlen; /* how much to read */ + size_t nr; /* number of chars actually read */ + luaL_Buffer b; + luaL_buffinit(L, &b); + rlen = LUAL_BUFFERSIZE; /* try to read that much each time */ + do { + char *p = luaL_prepbuffer(&b); + if (rlen > n) rlen = n; /* cannot read more than asked */ + nr = fread(p, sizeof(char), rlen, f); + luaL_addsize(&b, nr); + n -= nr; /* still have to read `n' chars */ + } while (n > 0 && nr == rlen); /* until end of count or eof */ + luaL_pushresult(&b); /* close buffer */ + return (n == 0 || lua_objlen(L, -1) > 0); +} + + +static int g_read (lua_State *L, FILE *f, int first) { + int nargs = lua_gettop(L) - 1; + int success; + int n; + clearerr(f); + if (nargs == 0) { /* no arguments? */ + success = read_line(L, f); + n = first+1; /* to return 1 result */ + } + else { /* ensure stack space for all results and for auxlib's buffer */ + luaL_checkstack(L, nargs+LUA_MINSTACK, "too many arguments"); + success = 1; + for (n = first; nargs-- && success; n++) { + if (lua_type(L, n) == LUA_TNUMBER) { + size_t l = (size_t)lua_tointeger(L, n); + success = (l == 0) ? test_eof(L, f) : read_chars(L, f, l); + } + else { + const char *p = lua_tostring(L, n); + luaL_argcheck(L, p && p[0] == '*', n, "invalid option"); + switch (p[1]) { + case 'n': /* number */ + success = read_number(L, f); + break; + case 'l': /* line */ + success = read_line(L, f); + break; + case 'a': /* file */ + read_chars(L, f, ~((size_t)0)); /* read MAX_SIZE_T chars */ + success = 1; /* always success */ + break; + default: + return luaL_argerror(L, n, "invalid format"); + } + } + } + } + if (ferror(f)) + return pushresult(L, 0, NULL); + if (!success) { + lua_pop(L, 1); /* remove last result */ + lua_pushnil(L); /* push nil instead */ + } + return n - first; +} + + +static int io_read (lua_State *L) { + return g_read(L, getiofile(L, IO_INPUT), 1); +} + + +static int f_read (lua_State *L) { + return g_read(L, tofile(L), 2); +} + + +static int io_readline (lua_State *L) { + FILE *f = *(FILE **)lua_touserdata(L, lua_upvalueindex(1)); + int sucess; + if (f == NULL) /* file is already closed? */ + luaL_error(L, "file is already closed"); + sucess = read_line(L, f); + if (ferror(f)) + return luaL_error(L, "%s", strerror(errno)); + if (sucess) return 1; + else { /* EOF */ + if (lua_toboolean(L, lua_upvalueindex(2))) { /* generator created file? */ + lua_settop(L, 0); + lua_pushvalue(L, lua_upvalueindex(1)); + aux_close(L); /* close it */ + } + return 0; + } +} + +/* }====================================================== */ + + +static int g_write (lua_State *L, FILE *f, int arg) { + int nargs = lua_gettop(L) - 1; + int status = 1; + for (; nargs--; arg++) { + if (lua_type(L, arg) == LUA_TNUMBER) { + /* optimization: could be done exactly as for strings */ + status = status && + fprintf(f, LUA_NUMBER_FMT, lua_tonumber(L, arg)) > 0; + } + else { + size_t l; + const char *s = luaL_checklstring(L, arg, &l); + status = status && (fwrite(s, sizeof(char), l, f) == l); + } + } + return pushresult(L, status, NULL); +} + + +static int io_write (lua_State *L) { + return g_write(L, getiofile(L, IO_OUTPUT), 1); +} + + +static int f_write (lua_State *L) { + return g_write(L, tofile(L), 2); +} + + +static int f_seek (lua_State *L) { + static const int mode[] = {SEEK_SET, SEEK_CUR, SEEK_END}; + static const char *const modenames[] = {"set", "cur", "end", NULL}; + FILE *f = tofile(L); + int op = luaL_checkoption(L, 2, "cur", modenames); + long offset = luaL_optlong(L, 3, 0); + op = fseek(f, offset, mode[op]); + if (op) + return pushresult(L, 0, NULL); /* error */ + else { + lua_pushinteger(L, ftell(f)); + return 1; + } +} + + +static int f_setvbuf (lua_State *L) { + static const int mode[] = {_IONBF, _IOFBF, _IOLBF}; + static const char *const modenames[] = {"no", "full", "line", NULL}; + FILE *f = tofile(L); + int op = luaL_checkoption(L, 2, NULL, modenames); + lua_Integer sz = luaL_optinteger(L, 3, LUAL_BUFFERSIZE); + int res = setvbuf(f, NULL, mode[op], sz); + return pushresult(L, res == 0, NULL); +} + + + +static int io_flush (lua_State *L) { + return pushresult(L, fflush(getiofile(L, IO_OUTPUT)) == 0, NULL); +} + + +static int f_flush (lua_State *L) { + return pushresult(L, fflush(tofile(L)) == 0, NULL); +} + + +static const luaL_Reg iolib[] = { + {"close", io_close}, + {"flush", io_flush}, + {"input", io_input}, + {"lines", io_lines}, + {"open", io_open}, + {"output", io_output}, + {"popen", io_popen}, + {"read", io_read}, + {"tmpfile", io_tmpfile}, + {"type", io_type}, + {"write", io_write}, + {NULL, NULL} +}; + + +static const luaL_Reg flib[] = { + {"close", io_close}, + {"flush", f_flush}, + {"lines", f_lines}, + {"read", f_read}, + {"seek", f_seek}, + {"setvbuf", f_setvbuf}, + {"write", f_write}, + {"__gc", io_gc}, + {"__tostring", io_tostring}, + {NULL, NULL} +}; + + +static void createmeta (lua_State *L) { + luaL_newmetatable(L, LUA_FILEHANDLE); /* create metatable for file handles */ + lua_pushvalue(L, -1); /* push metatable */ + lua_setfield(L, -2, "__index"); /* metatable.__index = metatable */ + luaL_register(L, NULL, flib); /* file methods */ +} + + +static void createstdfile (lua_State *L, FILE *f, int k, const char *fname) { + *newfile(L) = f; + if (k > 0) { + lua_pushvalue(L, -1); + lua_rawseti(L, LUA_ENVIRONINDEX, k); + } + lua_pushvalue(L, -2); /* copy environment */ + lua_setfenv(L, -2); /* set it */ + lua_setfield(L, -3, fname); +} + + +static void newfenv (lua_State *L, lua_CFunction cls) { + lua_createtable(L, 0, 1); + lua_pushcfunction(L, cls); + lua_setfield(L, -2, "__close"); +} + + +LUALIB_API int luaopen_io (lua_State *L) { + createmeta(L); + /* create (private) environment (with fields IO_INPUT, IO_OUTPUT, __close) */ + newfenv(L, io_fclose); + lua_replace(L, LUA_ENVIRONINDEX); + /* open library */ + luaL_register(L, LUA_IOLIBNAME, iolib); + /* create (and set) default files */ + newfenv(L, io_noclose); /* close function for default files */ + createstdfile(L, stdin, IO_INPUT, "stdin"); + createstdfile(L, stdout, IO_OUTPUT, "stdout"); + createstdfile(L, stderr, 0, "stderr"); + lua_pop(L, 1); /* pop environment for default files */ + lua_getfield(L, -1, "popen"); + newfenv(L, io_pclose); /* create environment for 'popen' */ + lua_setfenv(L, -2); /* set fenv for 'popen' */ + lua_pop(L, 1); /* pop 'popen' */ + return 1; +} + diff --git a/deps/lua/src/llex.c b/deps/lua/src/llex.c new file mode 100644 index 0000000..88c6790 --- /dev/null +++ b/deps/lua/src/llex.c @@ -0,0 +1,463 @@ +/* +** $Id: llex.c,v 2.20.1.2 2009/11/23 14:58:22 roberto Exp $ +** Lexical Analyzer +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include + +#define llex_c +#define LUA_CORE + +#include "lua.h" + +#include "ldo.h" +#include "llex.h" +#include "lobject.h" +#include "lparser.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "lzio.h" + + + +#define next(ls) (ls->current = zgetc(ls->z)) + + + + +#define currIsNewline(ls) (ls->current == '\n' || ls->current == '\r') + + +/* ORDER RESERVED */ +const char *const luaX_tokens [] = { + "and", "break", "do", "else", "elseif", + "end", "false", "for", "function", "if", + "in", "local", "nil", "not", "or", "repeat", + "return", "then", "true", "until", "while", + "..", "...", "==", ">=", "<=", "~=", + "", "", "", "", + NULL +}; + + +#define save_and_next(ls) (save(ls, ls->current), next(ls)) + + +static void save (LexState *ls, int c) { + Mbuffer *b = ls->buff; + if (b->n + 1 > b->buffsize) { + size_t newsize; + if (b->buffsize >= MAX_SIZET/2) + luaX_lexerror(ls, "lexical element too long", 0); + newsize = b->buffsize * 2; + luaZ_resizebuffer(ls->L, b, newsize); + } + b->buffer[b->n++] = cast(char, c); +} + + +void luaX_init (lua_State *L) { + int i; + for (i=0; itsv.reserved = cast_byte(i+1); /* reserved word */ + } +} + + +#define MAXSRC 80 + + +const char *luaX_token2str (LexState *ls, int token) { + if (token < FIRST_RESERVED) { + lua_assert(token == cast(unsigned char, token)); + return (iscntrl(token)) ? luaO_pushfstring(ls->L, "char(%d)", token) : + luaO_pushfstring(ls->L, "%c", token); + } + else + return luaX_tokens[token-FIRST_RESERVED]; +} + + +static const char *txtToken (LexState *ls, int token) { + switch (token) { + case TK_NAME: + case TK_STRING: + case TK_NUMBER: + save(ls, '\0'); + return luaZ_buffer(ls->buff); + default: + return luaX_token2str(ls, token); + } +} + + +void luaX_lexerror (LexState *ls, const char *msg, int token) { + char buff[MAXSRC]; + luaO_chunkid(buff, getstr(ls->source), MAXSRC); + msg = luaO_pushfstring(ls->L, "%s:%d: %s", buff, ls->linenumber, msg); + if (token) + luaO_pushfstring(ls->L, "%s near " LUA_QS, msg, txtToken(ls, token)); + luaD_throw(ls->L, LUA_ERRSYNTAX); +} + + +void luaX_syntaxerror (LexState *ls, const char *msg) { + luaX_lexerror(ls, msg, ls->t.token); +} + + +TString *luaX_newstring (LexState *ls, const char *str, size_t l) { + lua_State *L = ls->L; + TString *ts = luaS_newlstr(L, str, l); + TValue *o = luaH_setstr(L, ls->fs->h, ts); /* entry for `str' */ + if (ttisnil(o)) { + setbvalue(o, 1); /* make sure `str' will not be collected */ + luaC_checkGC(L); + } + return ts; +} + + +static void inclinenumber (LexState *ls) { + int old = ls->current; + lua_assert(currIsNewline(ls)); + next(ls); /* skip `\n' or `\r' */ + if (currIsNewline(ls) && ls->current != old) + next(ls); /* skip `\n\r' or `\r\n' */ + if (++ls->linenumber >= MAX_INT) + luaX_syntaxerror(ls, "chunk has too many lines"); +} + + +void luaX_setinput (lua_State *L, LexState *ls, ZIO *z, TString *source) { + ls->decpoint = '.'; + ls->L = L; + ls->lookahead.token = TK_EOS; /* no look-ahead token */ + ls->z = z; + ls->fs = NULL; + ls->linenumber = 1; + ls->lastline = 1; + ls->source = source; + luaZ_resizebuffer(ls->L, ls->buff, LUA_MINBUFFER); /* initialize buffer */ + next(ls); /* read first char */ +} + + + +/* +** ======================================================= +** LEXICAL ANALYZER +** ======================================================= +*/ + + + +static int check_next (LexState *ls, const char *set) { + if (!strchr(set, ls->current)) + return 0; + save_and_next(ls); + return 1; +} + + +static void buffreplace (LexState *ls, char from, char to) { + size_t n = luaZ_bufflen(ls->buff); + char *p = luaZ_buffer(ls->buff); + while (n--) + if (p[n] == from) p[n] = to; +} + + +static void trydecpoint (LexState *ls, SemInfo *seminfo) { + /* format error: try to update decimal point separator */ + struct lconv *cv = localeconv(); + char old = ls->decpoint; + ls->decpoint = (cv ? cv->decimal_point[0] : '.'); + buffreplace(ls, old, ls->decpoint); /* try updated decimal separator */ + if (!luaO_str2d(luaZ_buffer(ls->buff), &seminfo->r)) { + /* format error with correct decimal point: no more options */ + buffreplace(ls, ls->decpoint, '.'); /* undo change (for error message) */ + luaX_lexerror(ls, "malformed number", TK_NUMBER); + } +} + + +/* LUA_NUMBER */ +static void read_numeral (LexState *ls, SemInfo *seminfo) { + lua_assert(isdigit(ls->current)); + do { + save_and_next(ls); + } while (isdigit(ls->current) || ls->current == '.'); + if (check_next(ls, "Ee")) /* `E'? */ + check_next(ls, "+-"); /* optional exponent sign */ + while (isalnum(ls->current) || ls->current == '_') + save_and_next(ls); + save(ls, '\0'); + buffreplace(ls, '.', ls->decpoint); /* follow locale for decimal point */ + if (!luaO_str2d(luaZ_buffer(ls->buff), &seminfo->r)) /* format error? */ + trydecpoint(ls, seminfo); /* try to update decimal point separator */ +} + + +static int skip_sep (LexState *ls) { + int count = 0; + int s = ls->current; + lua_assert(s == '[' || s == ']'); + save_and_next(ls); + while (ls->current == '=') { + save_and_next(ls); + count++; + } + return (ls->current == s) ? count : (-count) - 1; +} + + +static void read_long_string (LexState *ls, SemInfo *seminfo, int sep) { + int cont = 0; + (void)(cont); /* avoid warnings when `cont' is not used */ + save_and_next(ls); /* skip 2nd `[' */ + if (currIsNewline(ls)) /* string starts with a newline? */ + inclinenumber(ls); /* skip it */ + for (;;) { + switch (ls->current) { + case EOZ: + luaX_lexerror(ls, (seminfo) ? "unfinished long string" : + "unfinished long comment", TK_EOS); + break; /* to avoid warnings */ +#if defined(LUA_COMPAT_LSTR) + case '[': { + if (skip_sep(ls) == sep) { + save_and_next(ls); /* skip 2nd `[' */ + cont++; +#if LUA_COMPAT_LSTR == 1 + if (sep == 0) + luaX_lexerror(ls, "nesting of [[...]] is deprecated", '['); +#endif + } + break; + } +#endif + case ']': { + if (skip_sep(ls) == sep) { + save_and_next(ls); /* skip 2nd `]' */ +#if defined(LUA_COMPAT_LSTR) && LUA_COMPAT_LSTR == 2 + cont--; + if (sep == 0 && cont >= 0) break; +#endif + goto endloop; + } + break; + } + case '\n': + case '\r': { + save(ls, '\n'); + inclinenumber(ls); + if (!seminfo) luaZ_resetbuffer(ls->buff); /* avoid wasting space */ + break; + } + default: { + if (seminfo) save_and_next(ls); + else next(ls); + } + } + } endloop: + if (seminfo) + seminfo->ts = luaX_newstring(ls, luaZ_buffer(ls->buff) + (2 + sep), + luaZ_bufflen(ls->buff) - 2*(2 + sep)); +} + + +static void read_string (LexState *ls, int del, SemInfo *seminfo) { + save_and_next(ls); + while (ls->current != del) { + switch (ls->current) { + case EOZ: + luaX_lexerror(ls, "unfinished string", TK_EOS); + continue; /* to avoid warnings */ + case '\n': + case '\r': + luaX_lexerror(ls, "unfinished string", TK_STRING); + continue; /* to avoid warnings */ + case '\\': { + int c; + next(ls); /* do not save the `\' */ + switch (ls->current) { + case 'a': c = '\a'; break; + case 'b': c = '\b'; break; + case 'f': c = '\f'; break; + case 'n': c = '\n'; break; + case 'r': c = '\r'; break; + case 't': c = '\t'; break; + case 'v': c = '\v'; break; + case '\n': /* go through */ + case '\r': save(ls, '\n'); inclinenumber(ls); continue; + case EOZ: continue; /* will raise an error next loop */ + default: { + if (!isdigit(ls->current)) + save_and_next(ls); /* handles \\, \", \', and \? */ + else { /* \xxx */ + int i = 0; + c = 0; + do { + c = 10*c + (ls->current-'0'); + next(ls); + } while (++i<3 && isdigit(ls->current)); + if (c > UCHAR_MAX) + luaX_lexerror(ls, "escape sequence too large", TK_STRING); + save(ls, c); + } + continue; + } + } + save(ls, c); + next(ls); + continue; + } + default: + save_and_next(ls); + } + } + save_and_next(ls); /* skip delimiter */ + seminfo->ts = luaX_newstring(ls, luaZ_buffer(ls->buff) + 1, + luaZ_bufflen(ls->buff) - 2); +} + + +static int llex (LexState *ls, SemInfo *seminfo) { + luaZ_resetbuffer(ls->buff); + for (;;) { + switch (ls->current) { + case '\n': + case '\r': { + inclinenumber(ls); + continue; + } + case '-': { + next(ls); + if (ls->current != '-') return '-'; + /* else is a comment */ + next(ls); + if (ls->current == '[') { + int sep = skip_sep(ls); + luaZ_resetbuffer(ls->buff); /* `skip_sep' may dirty the buffer */ + if (sep >= 0) { + read_long_string(ls, NULL, sep); /* long comment */ + luaZ_resetbuffer(ls->buff); + continue; + } + } + /* else short comment */ + while (!currIsNewline(ls) && ls->current != EOZ) + next(ls); + continue; + } + case '[': { + int sep = skip_sep(ls); + if (sep >= 0) { + read_long_string(ls, seminfo, sep); + return TK_STRING; + } + else if (sep == -1) return '['; + else luaX_lexerror(ls, "invalid long string delimiter", TK_STRING); + } + case '=': { + next(ls); + if (ls->current != '=') return '='; + else { next(ls); return TK_EQ; } + } + case '<': { + next(ls); + if (ls->current != '=') return '<'; + else { next(ls); return TK_LE; } + } + case '>': { + next(ls); + if (ls->current != '=') return '>'; + else { next(ls); return TK_GE; } + } + case '~': { + next(ls); + if (ls->current != '=') return '~'; + else { next(ls); return TK_NE; } + } + case '"': + case '\'': { + read_string(ls, ls->current, seminfo); + return TK_STRING; + } + case '.': { + save_and_next(ls); + if (check_next(ls, ".")) { + if (check_next(ls, ".")) + return TK_DOTS; /* ... */ + else return TK_CONCAT; /* .. */ + } + else if (!isdigit(ls->current)) return '.'; + else { + read_numeral(ls, seminfo); + return TK_NUMBER; + } + } + case EOZ: { + return TK_EOS; + } + default: { + if (isspace(ls->current)) { + lua_assert(!currIsNewline(ls)); + next(ls); + continue; + } + else if (isdigit(ls->current)) { + read_numeral(ls, seminfo); + return TK_NUMBER; + } + else if (isalpha(ls->current) || ls->current == '_') { + /* identifier or reserved word */ + TString *ts; + do { + save_and_next(ls); + } while (isalnum(ls->current) || ls->current == '_'); + ts = luaX_newstring(ls, luaZ_buffer(ls->buff), + luaZ_bufflen(ls->buff)); + if (ts->tsv.reserved > 0) /* reserved word? */ + return ts->tsv.reserved - 1 + FIRST_RESERVED; + else { + seminfo->ts = ts; + return TK_NAME; + } + } + else { + int c = ls->current; + next(ls); + return c; /* single-char tokens (+ - / ...) */ + } + } + } + } +} + + +void luaX_next (LexState *ls) { + ls->lastline = ls->linenumber; + if (ls->lookahead.token != TK_EOS) { /* is there a look-ahead token? */ + ls->t = ls->lookahead; /* use this one */ + ls->lookahead.token = TK_EOS; /* and discharge it */ + } + else + ls->t.token = llex(ls, &ls->t.seminfo); /* read next token */ +} + + +void luaX_lookahead (LexState *ls) { + lua_assert(ls->lookahead.token == TK_EOS); + ls->lookahead.token = llex(ls, &ls->lookahead.seminfo); +} + diff --git a/deps/lua/src/llex.h b/deps/lua/src/llex.h new file mode 100644 index 0000000..a9201ce --- /dev/null +++ b/deps/lua/src/llex.h @@ -0,0 +1,81 @@ +/* +** $Id: llex.h,v 1.58.1.1 2007/12/27 13:02:25 roberto Exp $ +** Lexical Analyzer +** See Copyright Notice in lua.h +*/ + +#ifndef llex_h +#define llex_h + +#include "lobject.h" +#include "lzio.h" + + +#define FIRST_RESERVED 257 + +/* maximum length of a reserved word */ +#define TOKEN_LEN (sizeof("function")/sizeof(char)) + + +/* +* WARNING: if you change the order of this enumeration, +* grep "ORDER RESERVED" +*/ +enum RESERVED { + /* terminal symbols denoted by reserved words */ + TK_AND = FIRST_RESERVED, TK_BREAK, + TK_DO, TK_ELSE, TK_ELSEIF, TK_END, TK_FALSE, TK_FOR, TK_FUNCTION, + TK_IF, TK_IN, TK_LOCAL, TK_NIL, TK_NOT, TK_OR, TK_REPEAT, + TK_RETURN, TK_THEN, TK_TRUE, TK_UNTIL, TK_WHILE, + /* other terminal symbols */ + TK_CONCAT, TK_DOTS, TK_EQ, TK_GE, TK_LE, TK_NE, TK_NUMBER, + TK_NAME, TK_STRING, TK_EOS +}; + +/* number of reserved words */ +#define NUM_RESERVED (cast(int, TK_WHILE-FIRST_RESERVED+1)) + + +/* array with token `names' */ +LUAI_DATA const char *const luaX_tokens []; + + +typedef union { + lua_Number r; + TString *ts; +} SemInfo; /* semantics information */ + + +typedef struct Token { + int token; + SemInfo seminfo; +} Token; + + +typedef struct LexState { + int current; /* current character (charint) */ + int linenumber; /* input line counter */ + int lastline; /* line of last token `consumed' */ + Token t; /* current token */ + Token lookahead; /* look ahead token */ + struct FuncState *fs; /* `FuncState' is private to the parser */ + struct lua_State *L; + ZIO *z; /* input stream */ + Mbuffer *buff; /* buffer for tokens */ + TString *source; /* current source name */ + char decpoint; /* locale decimal point */ +} LexState; + + +LUAI_FUNC void luaX_init (lua_State *L); +LUAI_FUNC void luaX_setinput (lua_State *L, LexState *ls, ZIO *z, + TString *source); +LUAI_FUNC TString *luaX_newstring (LexState *ls, const char *str, size_t l); +LUAI_FUNC void luaX_next (LexState *ls); +LUAI_FUNC void luaX_lookahead (LexState *ls); +LUAI_FUNC void luaX_lexerror (LexState *ls, const char *msg, int token); +LUAI_FUNC void luaX_syntaxerror (LexState *ls, const char *s); +LUAI_FUNC const char *luaX_token2str (LexState *ls, int token); + + +#endif diff --git a/deps/lua/src/llimits.h b/deps/lua/src/llimits.h new file mode 100644 index 0000000..ca8dcb7 --- /dev/null +++ b/deps/lua/src/llimits.h @@ -0,0 +1,128 @@ +/* +** $Id: llimits.h,v 1.69.1.1 2007/12/27 13:02:25 roberto Exp $ +** Limits, basic types, and some other `installation-dependent' definitions +** See Copyright Notice in lua.h +*/ + +#ifndef llimits_h +#define llimits_h + + +#include +#include + + +#include "lua.h" + + +typedef LUAI_UINT32 lu_int32; + +typedef LUAI_UMEM lu_mem; + +typedef LUAI_MEM l_mem; + + + +/* chars used as small naturals (so that `char' is reserved for characters) */ +typedef unsigned char lu_byte; + + +#define MAX_SIZET ((size_t)(~(size_t)0)-2) + +#define MAX_LUMEM ((lu_mem)(~(lu_mem)0)-2) + + +#define MAX_INT (INT_MAX-2) /* maximum value of an int (-2 for safety) */ + +/* +** conversion of pointer to integer +** this is for hashing only; there is no problem if the integer +** cannot hold the whole pointer value +*/ +#define IntPoint(p) ((unsigned int)(lu_mem)(p)) + + + +/* type to ensure maximum alignment */ +typedef LUAI_USER_ALIGNMENT_T L_Umaxalign; + + +/* result of a `usual argument conversion' over lua_Number */ +typedef LUAI_UACNUMBER l_uacNumber; + + +/* internal assertions for in-house debugging */ +#ifdef lua_assert + +#define check_exp(c,e) (lua_assert(c), (e)) +#define api_check(l,e) lua_assert(e) + +#else + +#define lua_assert(c) ((void)0) +#define check_exp(c,e) (e) +#define api_check luai_apicheck + +#endif + + +#ifndef UNUSED +#define UNUSED(x) ((void)(x)) /* to avoid warnings */ +#endif + + +#ifndef cast +#define cast(t, exp) ((t)(exp)) +#endif + +#define cast_byte(i) cast(lu_byte, (i)) +#define cast_num(i) cast(lua_Number, (i)) +#define cast_int(i) cast(int, (i)) + + + +/* +** type for virtual-machine instructions +** must be an unsigned with (at least) 4 bytes (see details in lopcodes.h) +*/ +typedef lu_int32 Instruction; + + + +/* maximum stack for a Lua function */ +#define MAXSTACK 250 + + + +/* minimum size for the string table (must be power of 2) */ +#ifndef MINSTRTABSIZE +#define MINSTRTABSIZE 32 +#endif + + +/* minimum size for string buffer */ +#ifndef LUA_MINBUFFER +#define LUA_MINBUFFER 32 +#endif + + +#ifndef lua_lock +#define lua_lock(L) ((void) 0) +#define lua_unlock(L) ((void) 0) +#endif + +#ifndef luai_threadyield +#define luai_threadyield(L) {lua_unlock(L); lua_lock(L);} +#endif + + +/* +** macro to control inclusion of some hard tests on stack reallocation +*/ +#ifndef HARDSTACKTESTS +#define condhardstacktests(x) ((void)0) +#else +#define condhardstacktests(x) x +#endif + +#endif diff --git a/deps/lua/src/lmathlib.c b/deps/lua/src/lmathlib.c new file mode 100644 index 0000000..441fbf7 --- /dev/null +++ b/deps/lua/src/lmathlib.c @@ -0,0 +1,263 @@ +/* +** $Id: lmathlib.c,v 1.67.1.1 2007/12/27 13:02:25 roberto Exp $ +** Standard mathematical library +** See Copyright Notice in lua.h +*/ + + +#include +#include + +#define lmathlib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + +#undef PI +#define PI (3.14159265358979323846) +#define RADIANS_PER_DEGREE (PI/180.0) + + + +static int math_abs (lua_State *L) { + lua_pushnumber(L, fabs(luaL_checknumber(L, 1))); + return 1; +} + +static int math_sin (lua_State *L) { + lua_pushnumber(L, sin(luaL_checknumber(L, 1))); + return 1; +} + +static int math_sinh (lua_State *L) { + lua_pushnumber(L, sinh(luaL_checknumber(L, 1))); + return 1; +} + +static int math_cos (lua_State *L) { + lua_pushnumber(L, cos(luaL_checknumber(L, 1))); + return 1; +} + +static int math_cosh (lua_State *L) { + lua_pushnumber(L, cosh(luaL_checknumber(L, 1))); + return 1; +} + +static int math_tan (lua_State *L) { + lua_pushnumber(L, tan(luaL_checknumber(L, 1))); + return 1; +} + +static int math_tanh (lua_State *L) { + lua_pushnumber(L, tanh(luaL_checknumber(L, 1))); + return 1; +} + +static int math_asin (lua_State *L) { + lua_pushnumber(L, asin(luaL_checknumber(L, 1))); + return 1; +} + +static int math_acos (lua_State *L) { + lua_pushnumber(L, acos(luaL_checknumber(L, 1))); + return 1; +} + +static int math_atan (lua_State *L) { + lua_pushnumber(L, atan(luaL_checknumber(L, 1))); + return 1; +} + +static int math_atan2 (lua_State *L) { + lua_pushnumber(L, atan2(luaL_checknumber(L, 1), luaL_checknumber(L, 2))); + return 1; +} + +static int math_ceil (lua_State *L) { + lua_pushnumber(L, ceil(luaL_checknumber(L, 1))); + return 1; +} + +static int math_floor (lua_State *L) { + lua_pushnumber(L, floor(luaL_checknumber(L, 1))); + return 1; +} + +static int math_fmod (lua_State *L) { + lua_pushnumber(L, fmod(luaL_checknumber(L, 1), luaL_checknumber(L, 2))); + return 1; +} + +static int math_modf (lua_State *L) { + double ip; + double fp = modf(luaL_checknumber(L, 1), &ip); + lua_pushnumber(L, ip); + lua_pushnumber(L, fp); + return 2; +} + +static int math_sqrt (lua_State *L) { + lua_pushnumber(L, sqrt(luaL_checknumber(L, 1))); + return 1; +} + +static int math_pow (lua_State *L) { + lua_pushnumber(L, pow(luaL_checknumber(L, 1), luaL_checknumber(L, 2))); + return 1; +} + +static int math_log (lua_State *L) { + lua_pushnumber(L, log(luaL_checknumber(L, 1))); + return 1; +} + +static int math_log10 (lua_State *L) { + lua_pushnumber(L, log10(luaL_checknumber(L, 1))); + return 1; +} + +static int math_exp (lua_State *L) { + lua_pushnumber(L, exp(luaL_checknumber(L, 1))); + return 1; +} + +static int math_deg (lua_State *L) { + lua_pushnumber(L, luaL_checknumber(L, 1)/RADIANS_PER_DEGREE); + return 1; +} + +static int math_rad (lua_State *L) { + lua_pushnumber(L, luaL_checknumber(L, 1)*RADIANS_PER_DEGREE); + return 1; +} + +static int math_frexp (lua_State *L) { + int e; + lua_pushnumber(L, frexp(luaL_checknumber(L, 1), &e)); + lua_pushinteger(L, e); + return 2; +} + +static int math_ldexp (lua_State *L) { + lua_pushnumber(L, ldexp(luaL_checknumber(L, 1), luaL_checkint(L, 2))); + return 1; +} + + + +static int math_min (lua_State *L) { + int n = lua_gettop(L); /* number of arguments */ + lua_Number dmin = luaL_checknumber(L, 1); + int i; + for (i=2; i<=n; i++) { + lua_Number d = luaL_checknumber(L, i); + if (d < dmin) + dmin = d; + } + lua_pushnumber(L, dmin); + return 1; +} + + +static int math_max (lua_State *L) { + int n = lua_gettop(L); /* number of arguments */ + lua_Number dmax = luaL_checknumber(L, 1); + int i; + for (i=2; i<=n; i++) { + lua_Number d = luaL_checknumber(L, i); + if (d > dmax) + dmax = d; + } + lua_pushnumber(L, dmax); + return 1; +} + + +static int math_random (lua_State *L) { + /* the `%' avoids the (rare) case of r==1, and is needed also because on + some systems (SunOS!) `rand()' may return a value larger than RAND_MAX */ + lua_Number r = (lua_Number)(rand()%RAND_MAX) / (lua_Number)RAND_MAX; + switch (lua_gettop(L)) { /* check number of arguments */ + case 0: { /* no arguments */ + lua_pushnumber(L, r); /* Number between 0 and 1 */ + break; + } + case 1: { /* only upper limit */ + int u = luaL_checkint(L, 1); + luaL_argcheck(L, 1<=u, 1, "interval is empty"); + lua_pushnumber(L, floor(r*u)+1); /* int between 1 and `u' */ + break; + } + case 2: { /* lower and upper limits */ + int l = luaL_checkint(L, 1); + int u = luaL_checkint(L, 2); + luaL_argcheck(L, l<=u, 2, "interval is empty"); + lua_pushnumber(L, floor(r*(u-l+1))+l); /* int between `l' and `u' */ + break; + } + default: return luaL_error(L, "wrong number of arguments"); + } + return 1; +} + + +static int math_randomseed (lua_State *L) { + srand(luaL_checkint(L, 1)); + return 0; +} + + +static const luaL_Reg mathlib[] = { + {"abs", math_abs}, + {"acos", math_acos}, + {"asin", math_asin}, + {"atan2", math_atan2}, + {"atan", math_atan}, + {"ceil", math_ceil}, + {"cosh", math_cosh}, + {"cos", math_cos}, + {"deg", math_deg}, + {"exp", math_exp}, + {"floor", math_floor}, + {"fmod", math_fmod}, + {"frexp", math_frexp}, + {"ldexp", math_ldexp}, + {"log10", math_log10}, + {"log", math_log}, + {"max", math_max}, + {"min", math_min}, + {"modf", math_modf}, + {"pow", math_pow}, + {"rad", math_rad}, + {"random", math_random}, + {"randomseed", math_randomseed}, + {"sinh", math_sinh}, + {"sin", math_sin}, + {"sqrt", math_sqrt}, + {"tanh", math_tanh}, + {"tan", math_tan}, + {NULL, NULL} +}; + + +/* +** Open math library +*/ +LUALIB_API int luaopen_math (lua_State *L) { + luaL_register(L, LUA_MATHLIBNAME, mathlib); + lua_pushnumber(L, PI); + lua_setfield(L, -2, "pi"); + lua_pushnumber(L, HUGE_VAL); + lua_setfield(L, -2, "huge"); +#if defined(LUA_COMPAT_MOD) + lua_getfield(L, -1, "fmod"); + lua_setfield(L, -2, "mod"); +#endif + return 1; +} + diff --git a/deps/lua/src/lmem.c b/deps/lua/src/lmem.c new file mode 100644 index 0000000..ae7d8c9 --- /dev/null +++ b/deps/lua/src/lmem.c @@ -0,0 +1,86 @@ +/* +** $Id: lmem.c,v 1.70.1.1 2007/12/27 13:02:25 roberto Exp $ +** Interface to Memory Manager +** See Copyright Notice in lua.h +*/ + + +#include + +#define lmem_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" + + + +/* +** About the realloc function: +** void * frealloc (void *ud, void *ptr, size_t osize, size_t nsize); +** (`osize' is the old size, `nsize' is the new size) +** +** Lua ensures that (ptr == NULL) iff (osize == 0). +** +** * frealloc(ud, NULL, 0, x) creates a new block of size `x' +** +** * frealloc(ud, p, x, 0) frees the block `p' +** (in this specific case, frealloc must return NULL). +** particularly, frealloc(ud, NULL, 0, 0) does nothing +** (which is equivalent to free(NULL) in ANSI C) +** +** frealloc returns NULL if it cannot create or reallocate the area +** (any reallocation to an equal or smaller size cannot fail!) +*/ + + + +#define MINSIZEARRAY 4 + + +void *luaM_growaux_ (lua_State *L, void *block, int *size, size_t size_elems, + int limit, const char *errormsg) { + void *newblock; + int newsize; + if (*size >= limit/2) { /* cannot double it? */ + if (*size >= limit) /* cannot grow even a little? */ + luaG_runerror(L, errormsg); + newsize = limit; /* still have at least one free place */ + } + else { + newsize = (*size)*2; + if (newsize < MINSIZEARRAY) + newsize = MINSIZEARRAY; /* minimum size */ + } + newblock = luaM_reallocv(L, block, *size, newsize, size_elems); + *size = newsize; /* update only when everything else is OK */ + return newblock; +} + + +void *luaM_toobig (lua_State *L) { + luaG_runerror(L, "memory allocation error: block too big"); + return NULL; /* to avoid warnings */ +} + + + +/* +** generic allocation routine. +*/ +void *luaM_realloc_ (lua_State *L, void *block, size_t osize, size_t nsize) { + global_State *g = G(L); + lua_assert((osize == 0) == (block == NULL)); + block = (*g->frealloc)(g->ud, block, osize, nsize); + if (block == NULL && nsize > 0) + luaD_throw(L, LUA_ERRMEM); + lua_assert((nsize == 0) == (block == NULL)); + g->totalbytes = (g->totalbytes - osize) + nsize; + return block; +} + diff --git a/deps/lua/src/lmem.h b/deps/lua/src/lmem.h new file mode 100644 index 0000000..7c2dcb3 --- /dev/null +++ b/deps/lua/src/lmem.h @@ -0,0 +1,49 @@ +/* +** $Id: lmem.h,v 1.31.1.1 2007/12/27 13:02:25 roberto Exp $ +** Interface to Memory Manager +** See Copyright Notice in lua.h +*/ + +#ifndef lmem_h +#define lmem_h + + +#include + +#include "llimits.h" +#include "lua.h" + +#define MEMERRMSG "not enough memory" + + +#define luaM_reallocv(L,b,on,n,e) \ + ((cast(size_t, (n)+1) <= MAX_SIZET/(e)) ? /* +1 to avoid warnings */ \ + luaM_realloc_(L, (b), (on)*(e), (n)*(e)) : \ + luaM_toobig(L)) + +#define luaM_freemem(L, b, s) luaM_realloc_(L, (b), (s), 0) +#define luaM_free(L, b) luaM_realloc_(L, (b), sizeof(*(b)), 0) +#define luaM_freearray(L, b, n, t) luaM_reallocv(L, (b), n, 0, sizeof(t)) + +#define luaM_malloc(L,t) luaM_realloc_(L, NULL, 0, (t)) +#define luaM_new(L,t) cast(t *, luaM_malloc(L, sizeof(t))) +#define luaM_newvector(L,n,t) \ + cast(t *, luaM_reallocv(L, NULL, 0, n, sizeof(t))) + +#define luaM_growvector(L,v,nelems,size,t,limit,e) \ + if ((nelems)+1 > (size)) \ + ((v)=cast(t *, luaM_growaux_(L,v,&(size),sizeof(t),limit,e))) + +#define luaM_reallocvector(L, v,oldn,n,t) \ + ((v)=cast(t *, luaM_reallocv(L, v, oldn, n, sizeof(t)))) + + +LUAI_FUNC void *luaM_realloc_ (lua_State *L, void *block, size_t oldsize, + size_t size); +LUAI_FUNC void *luaM_toobig (lua_State *L); +LUAI_FUNC void *luaM_growaux_ (lua_State *L, void *block, int *size, + size_t size_elem, int limit, + const char *errormsg); + +#endif + diff --git a/deps/lua/src/loadlib.c b/deps/lua/src/loadlib.c new file mode 100644 index 0000000..6158c53 --- /dev/null +++ b/deps/lua/src/loadlib.c @@ -0,0 +1,666 @@ +/* +** $Id: loadlib.c,v 1.52.1.4 2009/09/09 13:17:16 roberto Exp $ +** Dynamic library loader for Lua +** See Copyright Notice in lua.h +** +** This module contains an implementation of loadlib for Unix systems +** that have dlfcn, an implementation for Darwin (Mac OS X), an +** implementation for Windows, and a stub for other systems. +*/ + + +#include +#include + + +#define loadlib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + +/* prefix for open functions in C libraries */ +#define LUA_POF "luaopen_" + +/* separator for open functions in C libraries */ +#define LUA_OFSEP "_" + + +#define LIBPREFIX "LOADLIB: " + +#define POF LUA_POF +#define LIB_FAIL "open" + + +/* error codes for ll_loadfunc */ +#define ERRLIB 1 +#define ERRFUNC 2 + +#define setprogdir(L) ((void)0) + + +static void ll_unloadlib (void *lib); +static void *ll_load (lua_State *L, const char *path); +static lua_CFunction ll_sym (lua_State *L, void *lib, const char *sym); + + + +#if defined(LUA_DL_DLOPEN) +/* +** {======================================================================== +** This is an implementation of loadlib based on the dlfcn interface. +** The dlfcn interface is available in Linux, SunOS, Solaris, IRIX, FreeBSD, +** NetBSD, AIX 4.2, HPUX 11, and probably most other Unix flavors, at least +** as an emulation layer on top of native functions. +** ========================================================================= +*/ + +#include + +static void ll_unloadlib (void *lib) { + dlclose(lib); +} + + +static void *ll_load (lua_State *L, const char *path) { + void *lib = dlopen(path, RTLD_NOW); + if (lib == NULL) lua_pushstring(L, dlerror()); + return lib; +} + + +static lua_CFunction ll_sym (lua_State *L, void *lib, const char *sym) { + lua_CFunction f = (lua_CFunction)dlsym(lib, sym); + if (f == NULL) lua_pushstring(L, dlerror()); + return f; +} + +/* }====================================================== */ + + + +#elif defined(LUA_DL_DLL) +/* +** {====================================================================== +** This is an implementation of loadlib for Windows using native functions. +** ======================================================================= +*/ + +#include + + +#undef setprogdir + +static void setprogdir (lua_State *L) { + char buff[MAX_PATH + 1]; + char *lb; + DWORD nsize = sizeof(buff)/sizeof(char); + DWORD n = GetModuleFileNameA(NULL, buff, nsize); + if (n == 0 || n == nsize || (lb = strrchr(buff, '\\')) == NULL) + luaL_error(L, "unable to get ModuleFileName"); + else { + *lb = '\0'; + luaL_gsub(L, lua_tostring(L, -1), LUA_EXECDIR, buff); + lua_remove(L, -2); /* remove original string */ + } +} + + +static void pusherror (lua_State *L) { + int error = GetLastError(); + char buffer[128]; + if (FormatMessageA(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM, + NULL, error, 0, buffer, sizeof(buffer), NULL)) + lua_pushstring(L, buffer); + else + lua_pushfstring(L, "system error %d\n", error); +} + +static void ll_unloadlib (void *lib) { + FreeLibrary((HINSTANCE)lib); +} + + +static void *ll_load (lua_State *L, const char *path) { + HINSTANCE lib = LoadLibraryA(path); + if (lib == NULL) pusherror(L); + return lib; +} + + +static lua_CFunction ll_sym (lua_State *L, void *lib, const char *sym) { + lua_CFunction f = (lua_CFunction)GetProcAddress((HINSTANCE)lib, sym); + if (f == NULL) pusherror(L); + return f; +} + +/* }====================================================== */ + + + +#elif defined(LUA_DL_DYLD) +/* +** {====================================================================== +** Native Mac OS X / Darwin Implementation +** ======================================================================= +*/ + +#include + + +/* Mac appends a `_' before C function names */ +#undef POF +#define POF "_" LUA_POF + + +static void pusherror (lua_State *L) { + const char *err_str; + const char *err_file; + NSLinkEditErrors err; + int err_num; + NSLinkEditError(&err, &err_num, &err_file, &err_str); + lua_pushstring(L, err_str); +} + + +static const char *errorfromcode (NSObjectFileImageReturnCode ret) { + switch (ret) { + case NSObjectFileImageInappropriateFile: + return "file is not a bundle"; + case NSObjectFileImageArch: + return "library is for wrong CPU type"; + case NSObjectFileImageFormat: + return "bad format"; + case NSObjectFileImageAccess: + return "cannot access file"; + case NSObjectFileImageFailure: + default: + return "unable to load library"; + } +} + + +static void ll_unloadlib (void *lib) { + NSUnLinkModule((NSModule)lib, NSUNLINKMODULE_OPTION_RESET_LAZY_REFERENCES); +} + + +static void *ll_load (lua_State *L, const char *path) { + NSObjectFileImage img; + NSObjectFileImageReturnCode ret; + /* this would be a rare case, but prevents crashing if it happens */ + if(!_dyld_present()) { + lua_pushliteral(L, "dyld not present"); + return NULL; + } + ret = NSCreateObjectFileImageFromFile(path, &img); + if (ret == NSObjectFileImageSuccess) { + NSModule mod = NSLinkModule(img, path, NSLINKMODULE_OPTION_PRIVATE | + NSLINKMODULE_OPTION_RETURN_ON_ERROR); + NSDestroyObjectFileImage(img); + if (mod == NULL) pusherror(L); + return mod; + } + lua_pushstring(L, errorfromcode(ret)); + return NULL; +} + + +static lua_CFunction ll_sym (lua_State *L, void *lib, const char *sym) { + NSSymbol nss = NSLookupSymbolInModule((NSModule)lib, sym); + if (nss == NULL) { + lua_pushfstring(L, "symbol " LUA_QS " not found", sym); + return NULL; + } + return (lua_CFunction)NSAddressOfSymbol(nss); +} + +/* }====================================================== */ + + + +#else +/* +** {====================================================== +** Fallback for other systems +** ======================================================= +*/ + +#undef LIB_FAIL +#define LIB_FAIL "absent" + + +#define DLMSG "dynamic libraries not enabled; check your Lua installation" + + +static void ll_unloadlib (void *lib) { + (void)lib; /* to avoid warnings */ +} + + +static void *ll_load (lua_State *L, const char *path) { + (void)path; /* to avoid warnings */ + lua_pushliteral(L, DLMSG); + return NULL; +} + + +static lua_CFunction ll_sym (lua_State *L, void *lib, const char *sym) { + (void)lib; (void)sym; /* to avoid warnings */ + lua_pushliteral(L, DLMSG); + return NULL; +} + +/* }====================================================== */ +#endif + + + +static void **ll_register (lua_State *L, const char *path) { + void **plib; + lua_pushfstring(L, "%s%s", LIBPREFIX, path); + lua_gettable(L, LUA_REGISTRYINDEX); /* check library in registry? */ + if (!lua_isnil(L, -1)) /* is there an entry? */ + plib = (void **)lua_touserdata(L, -1); + else { /* no entry yet; create one */ + lua_pop(L, 1); + plib = (void **)lua_newuserdata(L, sizeof(const void *)); + *plib = NULL; + luaL_getmetatable(L, "_LOADLIB"); + lua_setmetatable(L, -2); + lua_pushfstring(L, "%s%s", LIBPREFIX, path); + lua_pushvalue(L, -2); + lua_settable(L, LUA_REGISTRYINDEX); + } + return plib; +} + + +/* +** __gc tag method: calls library's `ll_unloadlib' function with the lib +** handle +*/ +static int gctm (lua_State *L) { + void **lib = (void **)luaL_checkudata(L, 1, "_LOADLIB"); + if (*lib) ll_unloadlib(*lib); + *lib = NULL; /* mark library as closed */ + return 0; +} + + +static int ll_loadfunc (lua_State *L, const char *path, const char *sym) { + void **reg = ll_register(L, path); + if (*reg == NULL) *reg = ll_load(L, path); + if (*reg == NULL) + return ERRLIB; /* unable to load library */ + else { + lua_CFunction f = ll_sym(L, *reg, sym); + if (f == NULL) + return ERRFUNC; /* unable to find function */ + lua_pushcfunction(L, f); + return 0; /* return function */ + } +} + + +static int ll_loadlib (lua_State *L) { + const char *path = luaL_checkstring(L, 1); + const char *init = luaL_checkstring(L, 2); + int stat = ll_loadfunc(L, path, init); + if (stat == 0) /* no errors? */ + return 1; /* return the loaded function */ + else { /* error; error message is on stack top */ + lua_pushnil(L); + lua_insert(L, -2); + lua_pushstring(L, (stat == ERRLIB) ? LIB_FAIL : "init"); + return 3; /* return nil, error message, and where */ + } +} + + + +/* +** {====================================================== +** 'require' function +** ======================================================= +*/ + + +static int readable (const char *filename) { + FILE *f = fopen(filename, "r"); /* try to open file */ + if (f == NULL) return 0; /* open failed */ + fclose(f); + return 1; +} + + +static const char *pushnexttemplate (lua_State *L, const char *path) { + const char *l; + while (*path == *LUA_PATHSEP) path++; /* skip separators */ + if (*path == '\0') return NULL; /* no more templates */ + l = strchr(path, *LUA_PATHSEP); /* find next separator */ + if (l == NULL) l = path + strlen(path); + lua_pushlstring(L, path, l - path); /* template */ + return l; +} + + +static const char *findfile (lua_State *L, const char *name, + const char *pname) { + const char *path; + name = luaL_gsub(L, name, ".", LUA_DIRSEP); + lua_getfield(L, LUA_ENVIRONINDEX, pname); + path = lua_tostring(L, -1); + if (path == NULL) + luaL_error(L, LUA_QL("package.%s") " must be a string", pname); + lua_pushliteral(L, ""); /* error accumulator */ + while ((path = pushnexttemplate(L, path)) != NULL) { + const char *filename; + filename = luaL_gsub(L, lua_tostring(L, -1), LUA_PATH_MARK, name); + lua_remove(L, -2); /* remove path template */ + if (readable(filename)) /* does file exist and is readable? */ + return filename; /* return that file name */ + lua_pushfstring(L, "\n\tno file " LUA_QS, filename); + lua_remove(L, -2); /* remove file name */ + lua_concat(L, 2); /* add entry to possible error message */ + } + return NULL; /* not found */ +} + + +static void loaderror (lua_State *L, const char *filename) { + luaL_error(L, "error loading module " LUA_QS " from file " LUA_QS ":\n\t%s", + lua_tostring(L, 1), filename, lua_tostring(L, -1)); +} + + +static int loader_Lua (lua_State *L) { + const char *filename; + const char *name = luaL_checkstring(L, 1); + filename = findfile(L, name, "path"); + if (filename == NULL) return 1; /* library not found in this path */ + if (luaL_loadfile(L, filename) != 0) + loaderror(L, filename); + return 1; /* library loaded successfully */ +} + + +static const char *mkfuncname (lua_State *L, const char *modname) { + const char *funcname; + const char *mark = strchr(modname, *LUA_IGMARK); + if (mark) modname = mark + 1; + funcname = luaL_gsub(L, modname, ".", LUA_OFSEP); + funcname = lua_pushfstring(L, POF"%s", funcname); + lua_remove(L, -2); /* remove 'gsub' result */ + return funcname; +} + + +static int loader_C (lua_State *L) { + const char *funcname; + const char *name = luaL_checkstring(L, 1); + const char *filename = findfile(L, name, "cpath"); + if (filename == NULL) return 1; /* library not found in this path */ + funcname = mkfuncname(L, name); + if (ll_loadfunc(L, filename, funcname) != 0) + loaderror(L, filename); + return 1; /* library loaded successfully */ +} + + +static int loader_Croot (lua_State *L) { + const char *funcname; + const char *filename; + const char *name = luaL_checkstring(L, 1); + const char *p = strchr(name, '.'); + int stat; + if (p == NULL) return 0; /* is root */ + lua_pushlstring(L, name, p - name); + filename = findfile(L, lua_tostring(L, -1), "cpath"); + if (filename == NULL) return 1; /* root not found */ + funcname = mkfuncname(L, name); + if ((stat = ll_loadfunc(L, filename, funcname)) != 0) { + if (stat != ERRFUNC) loaderror(L, filename); /* real error */ + lua_pushfstring(L, "\n\tno module " LUA_QS " in file " LUA_QS, + name, filename); + return 1; /* function not found */ + } + return 1; +} + + +static int loader_preload (lua_State *L) { + const char *name = luaL_checkstring(L, 1); + lua_getfield(L, LUA_ENVIRONINDEX, "preload"); + if (!lua_istable(L, -1)) + luaL_error(L, LUA_QL("package.preload") " must be a table"); + lua_getfield(L, -1, name); + if (lua_isnil(L, -1)) /* not found? */ + lua_pushfstring(L, "\n\tno field package.preload['%s']", name); + return 1; +} + + +static const int sentinel_ = 0; +#define sentinel ((void *)&sentinel_) + + +static int ll_require (lua_State *L) { + const char *name = luaL_checkstring(L, 1); + int i; + lua_settop(L, 1); /* _LOADED table will be at index 2 */ + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, 2, name); + if (lua_toboolean(L, -1)) { /* is it there? */ + if (lua_touserdata(L, -1) == sentinel) /* check loops */ + luaL_error(L, "loop or previous error loading module " LUA_QS, name); + return 1; /* package is already loaded */ + } + /* else must load it; iterate over available loaders */ + lua_getfield(L, LUA_ENVIRONINDEX, "loaders"); + if (!lua_istable(L, -1)) + luaL_error(L, LUA_QL("package.loaders") " must be a table"); + lua_pushliteral(L, ""); /* error message accumulator */ + for (i=1; ; i++) { + lua_rawgeti(L, -2, i); /* get a loader */ + if (lua_isnil(L, -1)) + luaL_error(L, "module " LUA_QS " not found:%s", + name, lua_tostring(L, -2)); + lua_pushstring(L, name); + lua_call(L, 1, 1); /* call it */ + if (lua_isfunction(L, -1)) /* did it find module? */ + break; /* module loaded successfully */ + else if (lua_isstring(L, -1)) /* loader returned error message? */ + lua_concat(L, 2); /* accumulate it */ + else + lua_pop(L, 1); + } + lua_pushlightuserdata(L, sentinel); + lua_setfield(L, 2, name); /* _LOADED[name] = sentinel */ + lua_pushstring(L, name); /* pass name as argument to module */ + lua_call(L, 1, 1); /* run loaded module */ + if (!lua_isnil(L, -1)) /* non-nil return? */ + lua_setfield(L, 2, name); /* _LOADED[name] = returned value */ + lua_getfield(L, 2, name); + if (lua_touserdata(L, -1) == sentinel) { /* module did not set a value? */ + lua_pushboolean(L, 1); /* use true as result */ + lua_pushvalue(L, -1); /* extra copy to be returned */ + lua_setfield(L, 2, name); /* _LOADED[name] = true */ + } + return 1; +} + +/* }====================================================== */ + + + +/* +** {====================================================== +** 'module' function +** ======================================================= +*/ + + +static void setfenv (lua_State *L) { + lua_Debug ar; + if (lua_getstack(L, 1, &ar) == 0 || + lua_getinfo(L, "f", &ar) == 0 || /* get calling function */ + lua_iscfunction(L, -1)) + luaL_error(L, LUA_QL("module") " not called from a Lua function"); + lua_pushvalue(L, -2); + lua_setfenv(L, -2); + lua_pop(L, 1); +} + + +static void dooptions (lua_State *L, int n) { + int i; + for (i = 2; i <= n; i++) { + lua_pushvalue(L, i); /* get option (a function) */ + lua_pushvalue(L, -2); /* module */ + lua_call(L, 1, 0); + } +} + + +static void modinit (lua_State *L, const char *modname) { + const char *dot; + lua_pushvalue(L, -1); + lua_setfield(L, -2, "_M"); /* module._M = module */ + lua_pushstring(L, modname); + lua_setfield(L, -2, "_NAME"); + dot = strrchr(modname, '.'); /* look for last dot in module name */ + if (dot == NULL) dot = modname; + else dot++; + /* set _PACKAGE as package name (full module name minus last part) */ + lua_pushlstring(L, modname, dot - modname); + lua_setfield(L, -2, "_PACKAGE"); +} + + +static int ll_module (lua_State *L) { + const char *modname = luaL_checkstring(L, 1); + int loaded = lua_gettop(L) + 1; /* index of _LOADED table */ + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, loaded, modname); /* get _LOADED[modname] */ + if (!lua_istable(L, -1)) { /* not found? */ + lua_pop(L, 1); /* remove previous result */ + /* try global variable (and create one if it does not exist) */ + if (luaL_findtable(L, LUA_GLOBALSINDEX, modname, 1) != NULL) + return luaL_error(L, "name conflict for module " LUA_QS, modname); + lua_pushvalue(L, -1); + lua_setfield(L, loaded, modname); /* _LOADED[modname] = new table */ + } + /* check whether table already has a _NAME field */ + lua_getfield(L, -1, "_NAME"); + if (!lua_isnil(L, -1)) /* is table an initialized module? */ + lua_pop(L, 1); + else { /* no; initialize it */ + lua_pop(L, 1); + modinit(L, modname); + } + lua_pushvalue(L, -1); + setfenv(L); + dooptions(L, loaded - 1); + return 0; +} + + +static int ll_seeall (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + if (!lua_getmetatable(L, 1)) { + lua_createtable(L, 0, 1); /* create new metatable */ + lua_pushvalue(L, -1); + lua_setmetatable(L, 1); + } + lua_pushvalue(L, LUA_GLOBALSINDEX); + lua_setfield(L, -2, "__index"); /* mt.__index = _G */ + return 0; +} + + +/* }====================================================== */ + + + +/* auxiliary mark (for internal use) */ +#define AUXMARK "\1" + +static void setpath (lua_State *L, const char *fieldname, const char *envname, + const char *def) { + const char *path = getenv(envname); + if (path == NULL) /* no environment variable? */ + lua_pushstring(L, def); /* use default */ + else { + /* replace ";;" by ";AUXMARK;" and then AUXMARK by default path */ + path = luaL_gsub(L, path, LUA_PATHSEP LUA_PATHSEP, + LUA_PATHSEP AUXMARK LUA_PATHSEP); + luaL_gsub(L, path, AUXMARK, def); + lua_remove(L, -2); + } + setprogdir(L); + lua_setfield(L, -2, fieldname); +} + + +static const luaL_Reg pk_funcs[] = { + {"loadlib", ll_loadlib}, + {"seeall", ll_seeall}, + {NULL, NULL} +}; + + +static const luaL_Reg ll_funcs[] = { + {"module", ll_module}, + {"require", ll_require}, + {NULL, NULL} +}; + + +static const lua_CFunction loaders[] = + {loader_preload, loader_Lua, loader_C, loader_Croot, NULL}; + + +LUALIB_API int luaopen_package (lua_State *L) { + int i; + /* create new type _LOADLIB */ + luaL_newmetatable(L, "_LOADLIB"); + lua_pushcfunction(L, gctm); + lua_setfield(L, -2, "__gc"); + /* create `package' table */ + luaL_register(L, LUA_LOADLIBNAME, pk_funcs); +#if defined(LUA_COMPAT_LOADLIB) + lua_getfield(L, -1, "loadlib"); + lua_setfield(L, LUA_GLOBALSINDEX, "loadlib"); +#endif + lua_pushvalue(L, -1); + lua_replace(L, LUA_ENVIRONINDEX); + /* create `loaders' table */ + lua_createtable(L, sizeof(loaders)/sizeof(loaders[0]) - 1, 0); + /* fill it with pre-defined loaders */ + for (i=0; loaders[i] != NULL; i++) { + lua_pushcfunction(L, loaders[i]); + lua_rawseti(L, -2, i+1); + } + lua_setfield(L, -2, "loaders"); /* put it in field `loaders' */ + setpath(L, "path", LUA_PATH, LUA_PATH_DEFAULT); /* set field `path' */ + setpath(L, "cpath", LUA_CPATH, LUA_CPATH_DEFAULT); /* set field `cpath' */ + /* store config information */ + lua_pushliteral(L, LUA_DIRSEP "\n" LUA_PATHSEP "\n" LUA_PATH_MARK "\n" + LUA_EXECDIR "\n" LUA_IGMARK); + lua_setfield(L, -2, "config"); + /* set field `loaded' */ + luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 2); + lua_setfield(L, -2, "loaded"); + /* set field `preload' */ + lua_newtable(L); + lua_setfield(L, -2, "preload"); + lua_pushvalue(L, LUA_GLOBALSINDEX); + luaL_register(L, NULL, ll_funcs); /* open lib into global table */ + lua_pop(L, 1); + return 1; /* return 'package' table */ +} + diff --git a/deps/lua/src/lobject.c b/deps/lua/src/lobject.c new file mode 100644 index 0000000..4ff5073 --- /dev/null +++ b/deps/lua/src/lobject.c @@ -0,0 +1,214 @@ +/* +** $Id: lobject.c,v 2.22.1.1 2007/12/27 13:02:25 roberto Exp $ +** Some generic functions over Lua objects +** See Copyright Notice in lua.h +*/ + +#include +#include +#include +#include +#include + +#define lobject_c +#define LUA_CORE + +#include "lua.h" + +#include "ldo.h" +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" +#include "lstring.h" +#include "lvm.h" + + + +const TValue luaO_nilobject_ = {{NULL}, LUA_TNIL}; + + +/* +** converts an integer to a "floating point byte", represented as +** (eeeeexxx), where the real value is (1xxx) * 2^(eeeee - 1) if +** eeeee != 0 and (xxx) otherwise. +*/ +int luaO_int2fb (unsigned int x) { + int e = 0; /* expoent */ + while (x >= 16) { + x = (x+1) >> 1; + e++; + } + if (x < 8) return x; + else return ((e+1) << 3) | (cast_int(x) - 8); +} + + +/* converts back */ +int luaO_fb2int (int x) { + int e = (x >> 3) & 31; + if (e == 0) return x; + else return ((x & 7)+8) << (e - 1); +} + + +int luaO_log2 (unsigned int x) { + static const lu_byte log_2[256] = { + 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 + }; + int l = -1; + while (x >= 256) { l += 8; x >>= 8; } + return l + log_2[x]; + +} + + +int luaO_rawequalObj (const TValue *t1, const TValue *t2) { + if (ttype(t1) != ttype(t2)) return 0; + else switch (ttype(t1)) { + case LUA_TNIL: + return 1; + case LUA_TNUMBER: + return luai_numeq(nvalue(t1), nvalue(t2)); + case LUA_TBOOLEAN: + return bvalue(t1) == bvalue(t2); /* boolean true must be 1 !! */ + case LUA_TLIGHTUSERDATA: + return pvalue(t1) == pvalue(t2); + default: + lua_assert(iscollectable(t1)); + return gcvalue(t1) == gcvalue(t2); + } +} + + +int luaO_str2d (const char *s, lua_Number *result) { + char *endptr; + *result = lua_str2number(s, &endptr); + if (endptr == s) return 0; /* conversion failed */ + if (*endptr == 'x' || *endptr == 'X') /* maybe an hexadecimal constant? */ + *result = cast_num(strtoul(s, &endptr, 16)); + if (*endptr == '\0') return 1; /* most common case */ + while (isspace(cast(unsigned char, *endptr))) endptr++; + if (*endptr != '\0') return 0; /* invalid trailing characters? */ + return 1; +} + + + +static void pushstr (lua_State *L, const char *str) { + setsvalue2s(L, L->top, luaS_new(L, str)); + incr_top(L); +} + + +/* this function handles only `%d', `%c', %f, %p, and `%s' formats */ +const char *luaO_pushvfstring (lua_State *L, const char *fmt, va_list argp) { + int n = 1; + pushstr(L, ""); + for (;;) { + const char *e = strchr(fmt, '%'); + if (e == NULL) break; + setsvalue2s(L, L->top, luaS_newlstr(L, fmt, e-fmt)); + incr_top(L); + switch (*(e+1)) { + case 's': { + const char *s = va_arg(argp, char *); + if (s == NULL) s = "(null)"; + pushstr(L, s); + break; + } + case 'c': { + char buff[2]; + buff[0] = cast(char, va_arg(argp, int)); + buff[1] = '\0'; + pushstr(L, buff); + break; + } + case 'd': { + setnvalue(L->top, cast_num(va_arg(argp, int))); + incr_top(L); + break; + } + case 'f': { + setnvalue(L->top, cast_num(va_arg(argp, l_uacNumber))); + incr_top(L); + break; + } + case 'p': { + char buff[4*sizeof(void *) + 8]; /* should be enough space for a `%p' */ + sprintf(buff, "%p", va_arg(argp, void *)); + pushstr(L, buff); + break; + } + case '%': { + pushstr(L, "%"); + break; + } + default: { + char buff[3]; + buff[0] = '%'; + buff[1] = *(e+1); + buff[2] = '\0'; + pushstr(L, buff); + break; + } + } + n += 2; + fmt = e+2; + } + pushstr(L, fmt); + luaV_concat(L, n+1, cast_int(L->top - L->base) - 1); + L->top -= n; + return svalue(L->top - 1); +} + + +const char *luaO_pushfstring (lua_State *L, const char *fmt, ...) { + const char *msg; + va_list argp; + va_start(argp, fmt); + msg = luaO_pushvfstring(L, fmt, argp); + va_end(argp); + return msg; +} + + +void luaO_chunkid (char *out, const char *source, size_t bufflen) { + if (*source == '=') { + strncpy(out, source+1, bufflen); /* remove first char */ + out[bufflen-1] = '\0'; /* ensures null termination */ + } + else { /* out = "source", or "...source" */ + if (*source == '@') { + size_t l; + source++; /* skip the `@' */ + bufflen -= sizeof(" '...' "); + l = strlen(source); + strcpy(out, ""); + if (l > bufflen) { + source += (l-bufflen); /* get last part of file name */ + strcat(out, "..."); + } + strcat(out, source); + } + else { /* out = [string "string"] */ + size_t len = strcspn(source, "\n\r"); /* stop at first newline */ + bufflen -= sizeof(" [string \"...\"] "); + if (len > bufflen) len = bufflen; + strcpy(out, "[string \""); + if (source[len] != '\0') { /* must truncate? */ + strncat(out, source, len); + strcat(out, "..."); + } + else + strcat(out, source); + strcat(out, "\"]"); + } + } +} diff --git a/deps/lua/src/lobject.h b/deps/lua/src/lobject.h new file mode 100644 index 0000000..10e46b1 --- /dev/null +++ b/deps/lua/src/lobject.h @@ -0,0 +1,382 @@ +/* +** $Id: lobject.h,v 2.20.1.2 2008/08/06 13:29:48 roberto Exp $ +** Type definitions for Lua objects +** See Copyright Notice in lua.h +*/ + + +#ifndef lobject_h +#define lobject_h + + +#include + + +#include "llimits.h" +#include "lua.h" + + +/* tags for values visible from Lua */ +#define LAST_TAG LUA_TTHREAD + +#define NUM_TAGS (LAST_TAG+1) + + +/* +** Extra tags for non-values +*/ +#define LUA_TPROTO (LAST_TAG+1) +#define LUA_TUPVAL (LAST_TAG+2) +#define LUA_TDEADKEY (LAST_TAG+3) + + +/* +** Union of all collectable objects +*/ +typedef union GCObject GCObject; + + +/* +** Common Header for all collectable objects (in macro form, to be +** included in other objects) +*/ +#define CommonHeader GCObject *next; lu_byte tt; lu_byte marked + + +/* +** Common header in struct form +*/ +typedef struct GCheader { + CommonHeader; +} GCheader; + + + + +/* +** Union of all Lua values +*/ +typedef union { + GCObject *gc; + void *p; + lua_Number n; + int b; +} Value; + + +/* +** Tagged Values +*/ + +#define TValuefields Value value; int tt + +typedef struct lua_TValue { + TValuefields; +} TValue; + + +/* Macros to test type */ +#define ttisnil(o) (ttype(o) == LUA_TNIL) +#define ttisnumber(o) (ttype(o) == LUA_TNUMBER) +#define ttisstring(o) (ttype(o) == LUA_TSTRING) +#define ttistable(o) (ttype(o) == LUA_TTABLE) +#define ttisfunction(o) (ttype(o) == LUA_TFUNCTION) +#define ttisboolean(o) (ttype(o) == LUA_TBOOLEAN) +#define ttisuserdata(o) (ttype(o) == LUA_TUSERDATA) +#define ttisthread(o) (ttype(o) == LUA_TTHREAD) +#define ttislightuserdata(o) (ttype(o) == LUA_TLIGHTUSERDATA) + +/* Macros to access values */ +#define ttype(o) ((o)->tt) +#define gcvalue(o) check_exp(iscollectable(o), (o)->value.gc) +#define pvalue(o) check_exp(ttislightuserdata(o), (o)->value.p) +#define nvalue(o) check_exp(ttisnumber(o), (o)->value.n) +#define rawtsvalue(o) check_exp(ttisstring(o), &(o)->value.gc->ts) +#define tsvalue(o) (&rawtsvalue(o)->tsv) +#define rawuvalue(o) check_exp(ttisuserdata(o), &(o)->value.gc->u) +#define uvalue(o) (&rawuvalue(o)->uv) +#define clvalue(o) check_exp(ttisfunction(o), &(o)->value.gc->cl) +#define hvalue(o) check_exp(ttistable(o), &(o)->value.gc->h) +#define bvalue(o) check_exp(ttisboolean(o), (o)->value.b) +#define thvalue(o) check_exp(ttisthread(o), &(o)->value.gc->th) + +#define l_isfalse(o) (ttisnil(o) || (ttisboolean(o) && bvalue(o) == 0)) + +/* +** for internal debug only +*/ +#define checkconsistency(obj) \ + lua_assert(!iscollectable(obj) || (ttype(obj) == (obj)->value.gc->gch.tt)) + +#define checkliveness(g,obj) \ + lua_assert(!iscollectable(obj) || \ + ((ttype(obj) == (obj)->value.gc->gch.tt) && !isdead(g, (obj)->value.gc))) + + +/* Macros to set values */ +#define setnilvalue(obj) ((obj)->tt=LUA_TNIL) + +#define setnvalue(obj,x) \ + { TValue *i_o=(obj); i_o->value.n=(x); i_o->tt=LUA_TNUMBER; } + +#define setpvalue(obj,x) \ + { TValue *i_o=(obj); i_o->value.p=(x); i_o->tt=LUA_TLIGHTUSERDATA; } + +#define setbvalue(obj,x) \ + { TValue *i_o=(obj); i_o->value.b=(x); i_o->tt=LUA_TBOOLEAN; } + +#define setsvalue(L,obj,x) \ + { TValue *i_o=(obj); \ + i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TSTRING; \ + checkliveness(G(L),i_o); } + +#define setuvalue(L,obj,x) \ + { TValue *i_o=(obj); \ + i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TUSERDATA; \ + checkliveness(G(L),i_o); } + +#define setthvalue(L,obj,x) \ + { TValue *i_o=(obj); \ + i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TTHREAD; \ + checkliveness(G(L),i_o); } + +#define setclvalue(L,obj,x) \ + { TValue *i_o=(obj); \ + i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TFUNCTION; \ + checkliveness(G(L),i_o); } + +#define sethvalue(L,obj,x) \ + { TValue *i_o=(obj); \ + i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TTABLE; \ + checkliveness(G(L),i_o); } + +#define setptvalue(L,obj,x) \ + { TValue *i_o=(obj); \ + i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TPROTO; \ + checkliveness(G(L),i_o); } + + + + +#define setobj(L,obj1,obj2) \ + { const TValue *o2=(obj2); TValue *o1=(obj1); \ + o1->value = o2->value; o1->tt=o2->tt; \ + checkliveness(G(L),o1); } + + +/* +** different types of sets, according to destination +*/ + +/* from stack to (same) stack */ +#define setobjs2s setobj +/* to stack (not from same stack) */ +#define setobj2s setobj +#define setsvalue2s setsvalue +#define sethvalue2s sethvalue +#define setptvalue2s setptvalue +/* from table to same table */ +#define setobjt2t setobj +/* to table */ +#define setobj2t setobj +/* to new object */ +#define setobj2n setobj +#define setsvalue2n setsvalue + +#define setttype(obj, tt) (ttype(obj) = (tt)) + + +#define iscollectable(o) (ttype(o) >= LUA_TSTRING) + + + +typedef TValue *StkId; /* index to stack elements */ + + +/* +** String headers for string table +*/ +typedef union TString { + L_Umaxalign dummy; /* ensures maximum alignment for strings */ + struct { + CommonHeader; + lu_byte reserved; + unsigned int hash; + size_t len; + } tsv; +} TString; + + +#define getstr(ts) cast(const char *, (ts) + 1) +#define svalue(o) getstr(rawtsvalue(o)) + + + +typedef union Udata { + L_Umaxalign dummy; /* ensures maximum alignment for `local' udata */ + struct { + CommonHeader; + struct Table *metatable; + struct Table *env; + size_t len; + } uv; +} Udata; + + + + +/* +** Function Prototypes +*/ +typedef struct Proto { + CommonHeader; + TValue *k; /* constants used by the function */ + Instruction *code; + struct Proto **p; /* functions defined inside the function */ + int *lineinfo; /* map from opcodes to source lines */ + struct LocVar *locvars; /* information about local variables */ + TString **upvalues; /* upvalue names */ + TString *source; + int sizeupvalues; + int sizek; /* size of `k' */ + int sizecode; + int sizelineinfo; + int sizep; /* size of `p' */ + int sizelocvars; + int linedefined; + int lastlinedefined; + GCObject *gclist; + lu_byte nups; /* number of upvalues */ + lu_byte numparams; + lu_byte is_vararg; + lu_byte maxstacksize; +} Proto; + + +/* masks for new-style vararg */ +#define VARARG_HASARG 1 +#define VARARG_ISVARARG 2 +#define VARARG_NEEDSARG 4 + + +typedef struct LocVar { + TString *varname; + int startpc; /* first point where variable is active */ + int endpc; /* first point where variable is dead */ +} LocVar; + + + +/* +** Upvalues +*/ + +typedef struct UpVal { + CommonHeader; + TValue *v; /* points to stack or to its own value */ + union { + TValue value; /* the value (when closed) */ + struct { /* double linked list (when open) */ + struct UpVal *prev; + struct UpVal *next; + } l; + } u; +} UpVal; + + +/* +** Closures +*/ + +#define ClosureHeader \ + CommonHeader; lu_byte isC; lu_byte nupvalues; GCObject *gclist; \ + struct Table *env + +typedef struct CClosure { + ClosureHeader; + lua_CFunction f; + TValue upvalue[1]; +} CClosure; + + +typedef struct LClosure { + ClosureHeader; + struct Proto *p; + UpVal *upvals[1]; +} LClosure; + + +typedef union Closure { + CClosure c; + LClosure l; +} Closure; + + +#define iscfunction(o) (ttype(o) == LUA_TFUNCTION && clvalue(o)->c.isC) +#define isLfunction(o) (ttype(o) == LUA_TFUNCTION && !clvalue(o)->c.isC) + + +/* +** Tables +*/ + +typedef union TKey { + struct { + TValuefields; + struct Node *next; /* for chaining */ + } nk; + TValue tvk; +} TKey; + + +typedef struct Node { + TValue i_val; + TKey i_key; +} Node; + + +typedef struct Table { + CommonHeader; + lu_byte flags; /* 1<

lsizenode)) + + +#define luaO_nilobject (&luaO_nilobject_) + +LUAI_DATA const TValue luaO_nilobject_; + +#define ceillog2(x) (luaO_log2((x)-1) + 1) + +LUAI_FUNC int luaO_log2 (unsigned int x); +LUAI_FUNC int luaO_int2fb (unsigned int x); +LUAI_FUNC int luaO_fb2int (int x); +LUAI_FUNC int luaO_rawequalObj (const TValue *t1, const TValue *t2); +LUAI_FUNC int luaO_str2d (const char *s, lua_Number *result); +LUAI_FUNC const char *luaO_pushvfstring (lua_State *L, const char *fmt, + va_list argp); +LUAI_FUNC const char *luaO_pushfstring (lua_State *L, const char *fmt, ...); +LUAI_FUNC void luaO_chunkid (char *out, const char *source, size_t len); + + +#endif + diff --git a/deps/lua/src/lopcodes.c b/deps/lua/src/lopcodes.c new file mode 100644 index 0000000..4cc7452 --- /dev/null +++ b/deps/lua/src/lopcodes.c @@ -0,0 +1,102 @@ +/* +** $Id: lopcodes.c,v 1.37.1.1 2007/12/27 13:02:25 roberto Exp $ +** See Copyright Notice in lua.h +*/ + + +#define lopcodes_c +#define LUA_CORE + + +#include "lopcodes.h" + + +/* ORDER OP */ + +const char *const luaP_opnames[NUM_OPCODES+1] = { + "MOVE", + "LOADK", + "LOADBOOL", + "LOADNIL", + "GETUPVAL", + "GETGLOBAL", + "GETTABLE", + "SETGLOBAL", + "SETUPVAL", + "SETTABLE", + "NEWTABLE", + "SELF", + "ADD", + "SUB", + "MUL", + "DIV", + "MOD", + "POW", + "UNM", + "NOT", + "LEN", + "CONCAT", + "JMP", + "EQ", + "LT", + "LE", + "TEST", + "TESTSET", + "CALL", + "TAILCALL", + "RETURN", + "FORLOOP", + "FORPREP", + "TFORLOOP", + "SETLIST", + "CLOSE", + "CLOSURE", + "VARARG", + NULL +}; + + +#define opmode(t,a,b,c,m) (((t)<<7) | ((a)<<6) | ((b)<<4) | ((c)<<2) | (m)) + +const lu_byte luaP_opmodes[NUM_OPCODES] = { +/* T A B C mode opcode */ + opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_MOVE */ + ,opmode(0, 1, OpArgK, OpArgN, iABx) /* OP_LOADK */ + ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_LOADBOOL */ + ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_LOADNIL */ + ,opmode(0, 1, OpArgU, OpArgN, iABC) /* OP_GETUPVAL */ + ,opmode(0, 1, OpArgK, OpArgN, iABx) /* OP_GETGLOBAL */ + ,opmode(0, 1, OpArgR, OpArgK, iABC) /* OP_GETTABLE */ + ,opmode(0, 0, OpArgK, OpArgN, iABx) /* OP_SETGLOBAL */ + ,opmode(0, 0, OpArgU, OpArgN, iABC) /* OP_SETUPVAL */ + ,opmode(0, 0, OpArgK, OpArgK, iABC) /* OP_SETTABLE */ + ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_NEWTABLE */ + ,opmode(0, 1, OpArgR, OpArgK, iABC) /* OP_SELF */ + ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_ADD */ + ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_SUB */ + ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_MUL */ + ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_DIV */ + ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_MOD */ + ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_POW */ + ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_UNM */ + ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_NOT */ + ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_LEN */ + ,opmode(0, 1, OpArgR, OpArgR, iABC) /* OP_CONCAT */ + ,opmode(0, 0, OpArgR, OpArgN, iAsBx) /* OP_JMP */ + ,opmode(1, 0, OpArgK, OpArgK, iABC) /* OP_EQ */ + ,opmode(1, 0, OpArgK, OpArgK, iABC) /* OP_LT */ + ,opmode(1, 0, OpArgK, OpArgK, iABC) /* OP_LE */ + ,opmode(1, 1, OpArgR, OpArgU, iABC) /* OP_TEST */ + ,opmode(1, 1, OpArgR, OpArgU, iABC) /* OP_TESTSET */ + ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_CALL */ + ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_TAILCALL */ + ,opmode(0, 0, OpArgU, OpArgN, iABC) /* OP_RETURN */ + ,opmode(0, 1, OpArgR, OpArgN, iAsBx) /* OP_FORLOOP */ + ,opmode(0, 1, OpArgR, OpArgN, iAsBx) /* OP_FORPREP */ + ,opmode(1, 0, OpArgN, OpArgU, iABC) /* OP_TFORLOOP */ + ,opmode(0, 0, OpArgU, OpArgU, iABC) /* OP_SETLIST */ + ,opmode(0, 0, OpArgN, OpArgN, iABC) /* OP_CLOSE */ + ,opmode(0, 1, OpArgU, OpArgN, iABx) /* OP_CLOSURE */ + ,opmode(0, 1, OpArgU, OpArgN, iABC) /* OP_VARARG */ +}; + diff --git a/deps/lua/src/lopcodes.h b/deps/lua/src/lopcodes.h new file mode 100644 index 0000000..41224d6 --- /dev/null +++ b/deps/lua/src/lopcodes.h @@ -0,0 +1,268 @@ +/* +** $Id: lopcodes.h,v 1.125.1.1 2007/12/27 13:02:25 roberto Exp $ +** Opcodes for Lua virtual machine +** See Copyright Notice in lua.h +*/ + +#ifndef lopcodes_h +#define lopcodes_h + +#include "llimits.h" + + +/*=========================================================================== + We assume that instructions are unsigned numbers. + All instructions have an opcode in the first 6 bits. + Instructions can have the following fields: + `A' : 8 bits + `B' : 9 bits + `C' : 9 bits + `Bx' : 18 bits (`B' and `C' together) + `sBx' : signed Bx + + A signed argument is represented in excess K; that is, the number + value is the unsigned value minus K. K is exactly the maximum value + for that argument (so that -max is represented by 0, and +max is + represented by 2*max), which is half the maximum for the corresponding + unsigned argument. +===========================================================================*/ + + +enum OpMode {iABC, iABx, iAsBx}; /* basic instruction format */ + + +/* +** size and position of opcode arguments. +*/ +#define SIZE_C 9 +#define SIZE_B 9 +#define SIZE_Bx (SIZE_C + SIZE_B) +#define SIZE_A 8 + +#define SIZE_OP 6 + +#define POS_OP 0 +#define POS_A (POS_OP + SIZE_OP) +#define POS_C (POS_A + SIZE_A) +#define POS_B (POS_C + SIZE_C) +#define POS_Bx POS_C + + +/* +** limits for opcode arguments. +** we use (signed) int to manipulate most arguments, +** so they must fit in LUAI_BITSINT-1 bits (-1 for sign) +*/ +#if SIZE_Bx < LUAI_BITSINT-1 +#define MAXARG_Bx ((1<>1) /* `sBx' is signed */ +#else +#define MAXARG_Bx MAX_INT +#define MAXARG_sBx MAX_INT +#endif + + +#define MAXARG_A ((1<>POS_OP) & MASK1(SIZE_OP,0))) +#define SET_OPCODE(i,o) ((i) = (((i)&MASK0(SIZE_OP,POS_OP)) | \ + ((cast(Instruction, o)<>POS_A) & MASK1(SIZE_A,0))) +#define SETARG_A(i,u) ((i) = (((i)&MASK0(SIZE_A,POS_A)) | \ + ((cast(Instruction, u)<>POS_B) & MASK1(SIZE_B,0))) +#define SETARG_B(i,b) ((i) = (((i)&MASK0(SIZE_B,POS_B)) | \ + ((cast(Instruction, b)<>POS_C) & MASK1(SIZE_C,0))) +#define SETARG_C(i,b) ((i) = (((i)&MASK0(SIZE_C,POS_C)) | \ + ((cast(Instruction, b)<>POS_Bx) & MASK1(SIZE_Bx,0))) +#define SETARG_Bx(i,b) ((i) = (((i)&MASK0(SIZE_Bx,POS_Bx)) | \ + ((cast(Instruction, b)< C) then pc++ */ +OP_TESTSET,/* A B C if (R(B) <=> C) then R(A) := R(B) else pc++ */ + +OP_CALL,/* A B C R(A), ... ,R(A+C-2) := R(A)(R(A+1), ... ,R(A+B-1)) */ +OP_TAILCALL,/* A B C return R(A)(R(A+1), ... ,R(A+B-1)) */ +OP_RETURN,/* A B return R(A), ... ,R(A+B-2) (see note) */ + +OP_FORLOOP,/* A sBx R(A)+=R(A+2); + if R(A) =) R(A)*/ +OP_CLOSURE,/* A Bx R(A) := closure(KPROTO[Bx], R(A), ... ,R(A+n)) */ + +OP_VARARG/* A B R(A), R(A+1), ..., R(A+B-1) = vararg */ +} OpCode; + + +#define NUM_OPCODES (cast(int, OP_VARARG) + 1) + + + +/*=========================================================================== + Notes: + (*) In OP_CALL, if (B == 0) then B = top. C is the number of returns - 1, + and can be 0: OP_CALL then sets `top' to last_result+1, so + next open instruction (OP_CALL, OP_RETURN, OP_SETLIST) may use `top'. + + (*) In OP_VARARG, if (B == 0) then use actual number of varargs and + set top (like in OP_CALL with C == 0). + + (*) In OP_RETURN, if (B == 0) then return up to `top' + + (*) In OP_SETLIST, if (B == 0) then B = `top'; + if (C == 0) then next `instruction' is real C + + (*) For comparisons, A specifies what condition the test should accept + (true or false). + + (*) All `skips' (pc++) assume that next instruction is a jump +===========================================================================*/ + + +/* +** masks for instruction properties. The format is: +** bits 0-1: op mode +** bits 2-3: C arg mode +** bits 4-5: B arg mode +** bit 6: instruction set register A +** bit 7: operator is a test +*/ + +enum OpArgMask { + OpArgN, /* argument is not used */ + OpArgU, /* argument is used */ + OpArgR, /* argument is a register or a jump offset */ + OpArgK /* argument is a constant or register/constant */ +}; + +LUAI_DATA const lu_byte luaP_opmodes[NUM_OPCODES]; + +#define getOpMode(m) (cast(enum OpMode, luaP_opmodes[m] & 3)) +#define getBMode(m) (cast(enum OpArgMask, (luaP_opmodes[m] >> 4) & 3)) +#define getCMode(m) (cast(enum OpArgMask, (luaP_opmodes[m] >> 2) & 3)) +#define testAMode(m) (luaP_opmodes[m] & (1 << 6)) +#define testTMode(m) (luaP_opmodes[m] & (1 << 7)) + + +LUAI_DATA const char *const luaP_opnames[NUM_OPCODES+1]; /* opcode names */ + + +/* number of list items to accumulate before a SETLIST instruction */ +#define LFIELDS_PER_FLUSH 50 + + +#endif diff --git a/deps/lua/src/loslib.c b/deps/lua/src/loslib.c new file mode 100644 index 0000000..da06a57 --- /dev/null +++ b/deps/lua/src/loslib.c @@ -0,0 +1,243 @@ +/* +** $Id: loslib.c,v 1.19.1.3 2008/01/18 16:38:18 roberto Exp $ +** Standard Operating System library +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include +#include +#include + +#define loslib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + +static int os_pushresult (lua_State *L, int i, const char *filename) { + int en = errno; /* calls to Lua API may change this value */ + if (i) { + lua_pushboolean(L, 1); + return 1; + } + else { + lua_pushnil(L); + lua_pushfstring(L, "%s: %s", filename, strerror(en)); + lua_pushinteger(L, en); + return 3; + } +} + + +static int os_execute (lua_State *L) { + lua_pushinteger(L, system(luaL_optstring(L, 1, NULL))); + return 1; +} + + +static int os_remove (lua_State *L) { + const char *filename = luaL_checkstring(L, 1); + return os_pushresult(L, remove(filename) == 0, filename); +} + + +static int os_rename (lua_State *L) { + const char *fromname = luaL_checkstring(L, 1); + const char *toname = luaL_checkstring(L, 2); + return os_pushresult(L, rename(fromname, toname) == 0, fromname); +} + + +static int os_tmpname (lua_State *L) { + char buff[LUA_TMPNAMBUFSIZE]; + int err; + lua_tmpnam(buff, err); + if (err) + return luaL_error(L, "unable to generate a unique filename"); + lua_pushstring(L, buff); + return 1; +} + + +static int os_getenv (lua_State *L) { + lua_pushstring(L, getenv(luaL_checkstring(L, 1))); /* if NULL push nil */ + return 1; +} + + +static int os_clock (lua_State *L) { + lua_pushnumber(L, ((lua_Number)clock())/(lua_Number)CLOCKS_PER_SEC); + return 1; +} + + +/* +** {====================================================== +** Time/Date operations +** { year=%Y, month=%m, day=%d, hour=%H, min=%M, sec=%S, +** wday=%w+1, yday=%j, isdst=? } +** ======================================================= +*/ + +static void setfield (lua_State *L, const char *key, int value) { + lua_pushinteger(L, value); + lua_setfield(L, -2, key); +} + +static void setboolfield (lua_State *L, const char *key, int value) { + if (value < 0) /* undefined? */ + return; /* does not set field */ + lua_pushboolean(L, value); + lua_setfield(L, -2, key); +} + +static int getboolfield (lua_State *L, const char *key) { + int res; + lua_getfield(L, -1, key); + res = lua_isnil(L, -1) ? -1 : lua_toboolean(L, -1); + lua_pop(L, 1); + return res; +} + + +static int getfield (lua_State *L, const char *key, int d) { + int res; + lua_getfield(L, -1, key); + if (lua_isnumber(L, -1)) + res = (int)lua_tointeger(L, -1); + else { + if (d < 0) + return luaL_error(L, "field " LUA_QS " missing in date table", key); + res = d; + } + lua_pop(L, 1); + return res; +} + + +static int os_date (lua_State *L) { + const char *s = luaL_optstring(L, 1, "%c"); + time_t t = luaL_opt(L, (time_t)luaL_checknumber, 2, time(NULL)); + struct tm *stm; + if (*s == '!') { /* UTC? */ + stm = gmtime(&t); + s++; /* skip `!' */ + } + else + stm = localtime(&t); + if (stm == NULL) /* invalid date? */ + lua_pushnil(L); + else if (strcmp(s, "*t") == 0) { + lua_createtable(L, 0, 9); /* 9 = number of fields */ + setfield(L, "sec", stm->tm_sec); + setfield(L, "min", stm->tm_min); + setfield(L, "hour", stm->tm_hour); + setfield(L, "day", stm->tm_mday); + setfield(L, "month", stm->tm_mon+1); + setfield(L, "year", stm->tm_year+1900); + setfield(L, "wday", stm->tm_wday+1); + setfield(L, "yday", stm->tm_yday+1); + setboolfield(L, "isdst", stm->tm_isdst); + } + else { + char cc[3]; + luaL_Buffer b; + cc[0] = '%'; cc[2] = '\0'; + luaL_buffinit(L, &b); + for (; *s; s++) { + if (*s != '%' || *(s + 1) == '\0') /* no conversion specifier? */ + luaL_addchar(&b, *s); + else { + size_t reslen; + char buff[200]; /* should be big enough for any conversion result */ + cc[1] = *(++s); + reslen = strftime(buff, sizeof(buff), cc, stm); + luaL_addlstring(&b, buff, reslen); + } + } + luaL_pushresult(&b); + } + return 1; +} + + +static int os_time (lua_State *L) { + time_t t; + if (lua_isnoneornil(L, 1)) /* called without args? */ + t = time(NULL); /* get current time */ + else { + struct tm ts; + luaL_checktype(L, 1, LUA_TTABLE); + lua_settop(L, 1); /* make sure table is at the top */ + ts.tm_sec = getfield(L, "sec", 0); + ts.tm_min = getfield(L, "min", 0); + ts.tm_hour = getfield(L, "hour", 12); + ts.tm_mday = getfield(L, "day", -1); + ts.tm_mon = getfield(L, "month", -1) - 1; + ts.tm_year = getfield(L, "year", -1) - 1900; + ts.tm_isdst = getboolfield(L, "isdst"); + t = mktime(&ts); + } + if (t == (time_t)(-1)) + lua_pushnil(L); + else + lua_pushnumber(L, (lua_Number)t); + return 1; +} + + +static int os_difftime (lua_State *L) { + lua_pushnumber(L, difftime((time_t)(luaL_checknumber(L, 1)), + (time_t)(luaL_optnumber(L, 2, 0)))); + return 1; +} + +/* }====================================================== */ + + +static int os_setlocale (lua_State *L) { + static const int cat[] = {LC_ALL, LC_COLLATE, LC_CTYPE, LC_MONETARY, + LC_NUMERIC, LC_TIME}; + static const char *const catnames[] = {"all", "collate", "ctype", "monetary", + "numeric", "time", NULL}; + const char *l = luaL_optstring(L, 1, NULL); + int op = luaL_checkoption(L, 2, "all", catnames); + lua_pushstring(L, setlocale(cat[op], l)); + return 1; +} + + +static int os_exit (lua_State *L) { + exit(luaL_optint(L, 1, EXIT_SUCCESS)); +} + +static const luaL_Reg syslib[] = { + {"clock", os_clock}, + {"date", os_date}, + {"difftime", os_difftime}, + {"execute", os_execute}, + {"exit", os_exit}, + {"getenv", os_getenv}, + {"remove", os_remove}, + {"rename", os_rename}, + {"setlocale", os_setlocale}, + {"time", os_time}, + {"tmpname", os_tmpname}, + {NULL, NULL} +}; + +/* }====================================================== */ + + + +LUALIB_API int luaopen_os (lua_State *L) { + luaL_register(L, LUA_OSLIBNAME, syslib); + return 1; +} + diff --git a/deps/lua/src/lparser.c b/deps/lua/src/lparser.c new file mode 100644 index 0000000..dda7488 --- /dev/null +++ b/deps/lua/src/lparser.c @@ -0,0 +1,1339 @@ +/* +** $Id: lparser.c,v 2.42.1.4 2011/10/21 19:31:42 roberto Exp $ +** Lua Parser +** See Copyright Notice in lua.h +*/ + + +#include + +#define lparser_c +#define LUA_CORE + +#include "lua.h" + +#include "lcode.h" +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "llex.h" +#include "lmem.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lparser.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" + + + +#define hasmultret(k) ((k) == VCALL || (k) == VVARARG) + +#define getlocvar(fs, i) ((fs)->f->locvars[(fs)->actvar[i]]) + +#define luaY_checklimit(fs,v,l,m) if ((v)>(l)) errorlimit(fs,l,m) + + +/* +** nodes for block list (list of active blocks) +*/ +typedef struct BlockCnt { + struct BlockCnt *previous; /* chain */ + int breaklist; /* list of jumps out of this loop */ + lu_byte nactvar; /* # active locals outside the breakable structure */ + lu_byte upval; /* true if some variable in the block is an upvalue */ + lu_byte isbreakable; /* true if `block' is a loop */ +} BlockCnt; + + + +/* +** prototypes for recursive non-terminal functions +*/ +static void chunk (LexState *ls); +static void expr (LexState *ls, expdesc *v); + + +static void anchor_token (LexState *ls) { + if (ls->t.token == TK_NAME || ls->t.token == TK_STRING) { + TString *ts = ls->t.seminfo.ts; + luaX_newstring(ls, getstr(ts), ts->tsv.len); + } +} + + +static void error_expected (LexState *ls, int token) { + luaX_syntaxerror(ls, + luaO_pushfstring(ls->L, LUA_QS " expected", luaX_token2str(ls, token))); +} + + +static void errorlimit (FuncState *fs, int limit, const char *what) { + const char *msg = (fs->f->linedefined == 0) ? + luaO_pushfstring(fs->L, "main function has more than %d %s", limit, what) : + luaO_pushfstring(fs->L, "function at line %d has more than %d %s", + fs->f->linedefined, limit, what); + luaX_lexerror(fs->ls, msg, 0); +} + + +static int testnext (LexState *ls, int c) { + if (ls->t.token == c) { + luaX_next(ls); + return 1; + } + else return 0; +} + + +static void check (LexState *ls, int c) { + if (ls->t.token != c) + error_expected(ls, c); +} + +static void checknext (LexState *ls, int c) { + check(ls, c); + luaX_next(ls); +} + + +#define check_condition(ls,c,msg) { if (!(c)) luaX_syntaxerror(ls, msg); } + + + +static void check_match (LexState *ls, int what, int who, int where) { + if (!testnext(ls, what)) { + if (where == ls->linenumber) + error_expected(ls, what); + else { + luaX_syntaxerror(ls, luaO_pushfstring(ls->L, + LUA_QS " expected (to close " LUA_QS " at line %d)", + luaX_token2str(ls, what), luaX_token2str(ls, who), where)); + } + } +} + + +static TString *str_checkname (LexState *ls) { + TString *ts; + check(ls, TK_NAME); + ts = ls->t.seminfo.ts; + luaX_next(ls); + return ts; +} + + +static void init_exp (expdesc *e, expkind k, int i) { + e->f = e->t = NO_JUMP; + e->k = k; + e->u.s.info = i; +} + + +static void codestring (LexState *ls, expdesc *e, TString *s) { + init_exp(e, VK, luaK_stringK(ls->fs, s)); +} + + +static void checkname(LexState *ls, expdesc *e) { + codestring(ls, e, str_checkname(ls)); +} + + +static int registerlocalvar (LexState *ls, TString *varname) { + FuncState *fs = ls->fs; + Proto *f = fs->f; + int oldsize = f->sizelocvars; + luaM_growvector(ls->L, f->locvars, fs->nlocvars, f->sizelocvars, + LocVar, SHRT_MAX, "too many local variables"); + while (oldsize < f->sizelocvars) f->locvars[oldsize++].varname = NULL; + f->locvars[fs->nlocvars].varname = varname; + luaC_objbarrier(ls->L, f, varname); + return fs->nlocvars++; +} + + +#define new_localvarliteral(ls,v,n) \ + new_localvar(ls, luaX_newstring(ls, "" v, (sizeof(v)/sizeof(char))-1), n) + + +static void new_localvar (LexState *ls, TString *name, int n) { + FuncState *fs = ls->fs; + luaY_checklimit(fs, fs->nactvar+n+1, LUAI_MAXVARS, "local variables"); + fs->actvar[fs->nactvar+n] = cast(unsigned short, registerlocalvar(ls, name)); +} + + +static void adjustlocalvars (LexState *ls, int nvars) { + FuncState *fs = ls->fs; + fs->nactvar = cast_byte(fs->nactvar + nvars); + for (; nvars; nvars--) { + getlocvar(fs, fs->nactvar - nvars).startpc = fs->pc; + } +} + + +static void removevars (LexState *ls, int tolevel) { + FuncState *fs = ls->fs; + while (fs->nactvar > tolevel) + getlocvar(fs, --fs->nactvar).endpc = fs->pc; +} + + +static int indexupvalue (FuncState *fs, TString *name, expdesc *v) { + int i; + Proto *f = fs->f; + int oldsize = f->sizeupvalues; + for (i=0; inups; i++) { + if (fs->upvalues[i].k == v->k && fs->upvalues[i].info == v->u.s.info) { + lua_assert(f->upvalues[i] == name); + return i; + } + } + /* new one */ + luaY_checklimit(fs, f->nups + 1, LUAI_MAXUPVALUES, "upvalues"); + luaM_growvector(fs->L, f->upvalues, f->nups, f->sizeupvalues, + TString *, MAX_INT, ""); + while (oldsize < f->sizeupvalues) f->upvalues[oldsize++] = NULL; + f->upvalues[f->nups] = name; + luaC_objbarrier(fs->L, f, name); + lua_assert(v->k == VLOCAL || v->k == VUPVAL); + fs->upvalues[f->nups].k = cast_byte(v->k); + fs->upvalues[f->nups].info = cast_byte(v->u.s.info); + return f->nups++; +} + + +static int searchvar (FuncState *fs, TString *n) { + int i; + for (i=fs->nactvar-1; i >= 0; i--) { + if (n == getlocvar(fs, i).varname) + return i; + } + return -1; /* not found */ +} + + +static void markupval (FuncState *fs, int level) { + BlockCnt *bl = fs->bl; + while (bl && bl->nactvar > level) bl = bl->previous; + if (bl) bl->upval = 1; +} + + +static int singlevaraux (FuncState *fs, TString *n, expdesc *var, int base) { + if (fs == NULL) { /* no more levels? */ + init_exp(var, VGLOBAL, NO_REG); /* default is global variable */ + return VGLOBAL; + } + else { + int v = searchvar(fs, n); /* look up at current level */ + if (v >= 0) { + init_exp(var, VLOCAL, v); + if (!base) + markupval(fs, v); /* local will be used as an upval */ + return VLOCAL; + } + else { /* not found at current level; try upper one */ + if (singlevaraux(fs->prev, n, var, 0) == VGLOBAL) + return VGLOBAL; + var->u.s.info = indexupvalue(fs, n, var); /* else was LOCAL or UPVAL */ + var->k = VUPVAL; /* upvalue in this level */ + return VUPVAL; + } + } +} + + +static void singlevar (LexState *ls, expdesc *var) { + TString *varname = str_checkname(ls); + FuncState *fs = ls->fs; + if (singlevaraux(fs, varname, var, 1) == VGLOBAL) + var->u.s.info = luaK_stringK(fs, varname); /* info points to global name */ +} + + +static void adjust_assign (LexState *ls, int nvars, int nexps, expdesc *e) { + FuncState *fs = ls->fs; + int extra = nvars - nexps; + if (hasmultret(e->k)) { + extra++; /* includes call itself */ + if (extra < 0) extra = 0; + luaK_setreturns(fs, e, extra); /* last exp. provides the difference */ + if (extra > 1) luaK_reserveregs(fs, extra-1); + } + else { + if (e->k != VVOID) luaK_exp2nextreg(fs, e); /* close last expression */ + if (extra > 0) { + int reg = fs->freereg; + luaK_reserveregs(fs, extra); + luaK_nil(fs, reg, extra); + } + } +} + + +static void enterlevel (LexState *ls) { + if (++ls->L->nCcalls > LUAI_MAXCCALLS) + luaX_lexerror(ls, "chunk has too many syntax levels", 0); +} + + +#define leavelevel(ls) ((ls)->L->nCcalls--) + + +static void enterblock (FuncState *fs, BlockCnt *bl, lu_byte isbreakable) { + bl->breaklist = NO_JUMP; + bl->isbreakable = isbreakable; + bl->nactvar = fs->nactvar; + bl->upval = 0; + bl->previous = fs->bl; + fs->bl = bl; + lua_assert(fs->freereg == fs->nactvar); +} + + +static void leaveblock (FuncState *fs) { + BlockCnt *bl = fs->bl; + fs->bl = bl->previous; + removevars(fs->ls, bl->nactvar); + if (bl->upval) + luaK_codeABC(fs, OP_CLOSE, bl->nactvar, 0, 0); + /* a block either controls scope or breaks (never both) */ + lua_assert(!bl->isbreakable || !bl->upval); + lua_assert(bl->nactvar == fs->nactvar); + fs->freereg = fs->nactvar; /* free registers */ + luaK_patchtohere(fs, bl->breaklist); +} + + +static void pushclosure (LexState *ls, FuncState *func, expdesc *v) { + FuncState *fs = ls->fs; + Proto *f = fs->f; + int oldsize = f->sizep; + int i; + luaM_growvector(ls->L, f->p, fs->np, f->sizep, Proto *, + MAXARG_Bx, "constant table overflow"); + while (oldsize < f->sizep) f->p[oldsize++] = NULL; + f->p[fs->np++] = func->f; + luaC_objbarrier(ls->L, f, func->f); + init_exp(v, VRELOCABLE, luaK_codeABx(fs, OP_CLOSURE, 0, fs->np-1)); + for (i=0; if->nups; i++) { + OpCode o = (func->upvalues[i].k == VLOCAL) ? OP_MOVE : OP_GETUPVAL; + luaK_codeABC(fs, o, 0, func->upvalues[i].info, 0); + } +} + + +static void open_func (LexState *ls, FuncState *fs) { + lua_State *L = ls->L; + Proto *f = luaF_newproto(L); + fs->f = f; + fs->prev = ls->fs; /* linked list of funcstates */ + fs->ls = ls; + fs->L = L; + ls->fs = fs; + fs->pc = 0; + fs->lasttarget = -1; + fs->jpc = NO_JUMP; + fs->freereg = 0; + fs->nk = 0; + fs->np = 0; + fs->nlocvars = 0; + fs->nactvar = 0; + fs->bl = NULL; + f->source = ls->source; + f->maxstacksize = 2; /* registers 0/1 are always valid */ + fs->h = luaH_new(L, 0, 0); + /* anchor table of constants and prototype (to avoid being collected) */ + sethvalue2s(L, L->top, fs->h); + incr_top(L); + setptvalue2s(L, L->top, f); + incr_top(L); +} + + +static void close_func (LexState *ls) { + lua_State *L = ls->L; + FuncState *fs = ls->fs; + Proto *f = fs->f; + removevars(ls, 0); + luaK_ret(fs, 0, 0); /* final return */ + luaM_reallocvector(L, f->code, f->sizecode, fs->pc, Instruction); + f->sizecode = fs->pc; + luaM_reallocvector(L, f->lineinfo, f->sizelineinfo, fs->pc, int); + f->sizelineinfo = fs->pc; + luaM_reallocvector(L, f->k, f->sizek, fs->nk, TValue); + f->sizek = fs->nk; + luaM_reallocvector(L, f->p, f->sizep, fs->np, Proto *); + f->sizep = fs->np; + luaM_reallocvector(L, f->locvars, f->sizelocvars, fs->nlocvars, LocVar); + f->sizelocvars = fs->nlocvars; + luaM_reallocvector(L, f->upvalues, f->sizeupvalues, f->nups, TString *); + f->sizeupvalues = f->nups; + lua_assert(luaG_checkcode(f)); + lua_assert(fs->bl == NULL); + ls->fs = fs->prev; + /* last token read was anchored in defunct function; must reanchor it */ + if (fs) anchor_token(ls); + L->top -= 2; /* remove table and prototype from the stack */ +} + + +Proto *luaY_parser (lua_State *L, ZIO *z, Mbuffer *buff, const char *name) { + struct LexState lexstate; + struct FuncState funcstate; + lexstate.buff = buff; + luaX_setinput(L, &lexstate, z, luaS_new(L, name)); + open_func(&lexstate, &funcstate); + funcstate.f->is_vararg = VARARG_ISVARARG; /* main func. is always vararg */ + luaX_next(&lexstate); /* read first token */ + chunk(&lexstate); + check(&lexstate, TK_EOS); + close_func(&lexstate); + lua_assert(funcstate.prev == NULL); + lua_assert(funcstate.f->nups == 0); + lua_assert(lexstate.fs == NULL); + return funcstate.f; +} + + + +/*============================================================*/ +/* GRAMMAR RULES */ +/*============================================================*/ + + +static void field (LexState *ls, expdesc *v) { + /* field -> ['.' | ':'] NAME */ + FuncState *fs = ls->fs; + expdesc key; + luaK_exp2anyreg(fs, v); + luaX_next(ls); /* skip the dot or colon */ + checkname(ls, &key); + luaK_indexed(fs, v, &key); +} + + +static void yindex (LexState *ls, expdesc *v) { + /* index -> '[' expr ']' */ + luaX_next(ls); /* skip the '[' */ + expr(ls, v); + luaK_exp2val(ls->fs, v); + checknext(ls, ']'); +} + + +/* +** {====================================================================== +** Rules for Constructors +** ======================================================================= +*/ + + +struct ConsControl { + expdesc v; /* last list item read */ + expdesc *t; /* table descriptor */ + int nh; /* total number of `record' elements */ + int na; /* total number of array elements */ + int tostore; /* number of array elements pending to be stored */ +}; + + +static void recfield (LexState *ls, struct ConsControl *cc) { + /* recfield -> (NAME | `['exp1`]') = exp1 */ + FuncState *fs = ls->fs; + int reg = ls->fs->freereg; + expdesc key, val; + int rkkey; + if (ls->t.token == TK_NAME) { + luaY_checklimit(fs, cc->nh, MAX_INT, "items in a constructor"); + checkname(ls, &key); + } + else /* ls->t.token == '[' */ + yindex(ls, &key); + cc->nh++; + checknext(ls, '='); + rkkey = luaK_exp2RK(fs, &key); + expr(ls, &val); + luaK_codeABC(fs, OP_SETTABLE, cc->t->u.s.info, rkkey, luaK_exp2RK(fs, &val)); + fs->freereg = reg; /* free registers */ +} + + +static void closelistfield (FuncState *fs, struct ConsControl *cc) { + if (cc->v.k == VVOID) return; /* there is no list item */ + luaK_exp2nextreg(fs, &cc->v); + cc->v.k = VVOID; + if (cc->tostore == LFIELDS_PER_FLUSH) { + luaK_setlist(fs, cc->t->u.s.info, cc->na, cc->tostore); /* flush */ + cc->tostore = 0; /* no more items pending */ + } +} + + +static void lastlistfield (FuncState *fs, struct ConsControl *cc) { + if (cc->tostore == 0) return; + if (hasmultret(cc->v.k)) { + luaK_setmultret(fs, &cc->v); + luaK_setlist(fs, cc->t->u.s.info, cc->na, LUA_MULTRET); + cc->na--; /* do not count last expression (unknown number of elements) */ + } + else { + if (cc->v.k != VVOID) + luaK_exp2nextreg(fs, &cc->v); + luaK_setlist(fs, cc->t->u.s.info, cc->na, cc->tostore); + } +} + + +static void listfield (LexState *ls, struct ConsControl *cc) { + expr(ls, &cc->v); + luaY_checklimit(ls->fs, cc->na, MAX_INT, "items in a constructor"); + cc->na++; + cc->tostore++; +} + + +static void constructor (LexState *ls, expdesc *t) { + /* constructor -> ?? */ + FuncState *fs = ls->fs; + int line = ls->linenumber; + int pc = luaK_codeABC(fs, OP_NEWTABLE, 0, 0, 0); + struct ConsControl cc; + cc.na = cc.nh = cc.tostore = 0; + cc.t = t; + init_exp(t, VRELOCABLE, pc); + init_exp(&cc.v, VVOID, 0); /* no value (yet) */ + luaK_exp2nextreg(ls->fs, t); /* fix it at stack top (for gc) */ + checknext(ls, '{'); + do { + lua_assert(cc.v.k == VVOID || cc.tostore > 0); + if (ls->t.token == '}') break; + closelistfield(fs, &cc); + switch(ls->t.token) { + case TK_NAME: { /* may be listfields or recfields */ + luaX_lookahead(ls); + if (ls->lookahead.token != '=') /* expression? */ + listfield(ls, &cc); + else + recfield(ls, &cc); + break; + } + case '[': { /* constructor_item -> recfield */ + recfield(ls, &cc); + break; + } + default: { /* constructor_part -> listfield */ + listfield(ls, &cc); + break; + } + } + } while (testnext(ls, ',') || testnext(ls, ';')); + check_match(ls, '}', '{', line); + lastlistfield(fs, &cc); + SETARG_B(fs->f->code[pc], luaO_int2fb(cc.na)); /* set initial array size */ + SETARG_C(fs->f->code[pc], luaO_int2fb(cc.nh)); /* set initial table size */ +} + +/* }====================================================================== */ + + + +static void parlist (LexState *ls) { + /* parlist -> [ param { `,' param } ] */ + FuncState *fs = ls->fs; + Proto *f = fs->f; + int nparams = 0; + f->is_vararg = 0; + if (ls->t.token != ')') { /* is `parlist' not empty? */ + do { + switch (ls->t.token) { + case TK_NAME: { /* param -> NAME */ + new_localvar(ls, str_checkname(ls), nparams++); + break; + } + case TK_DOTS: { /* param -> `...' */ + luaX_next(ls); +#if defined(LUA_COMPAT_VARARG) + /* use `arg' as default name */ + new_localvarliteral(ls, "arg", nparams++); + f->is_vararg = VARARG_HASARG | VARARG_NEEDSARG; +#endif + f->is_vararg |= VARARG_ISVARARG; + break; + } + default: luaX_syntaxerror(ls, " or " LUA_QL("...") " expected"); + } + } while (!f->is_vararg && testnext(ls, ',')); + } + adjustlocalvars(ls, nparams); + f->numparams = cast_byte(fs->nactvar - (f->is_vararg & VARARG_HASARG)); + luaK_reserveregs(fs, fs->nactvar); /* reserve register for parameters */ +} + + +static void body (LexState *ls, expdesc *e, int needself, int line) { + /* body -> `(' parlist `)' chunk END */ + FuncState new_fs; + open_func(ls, &new_fs); + new_fs.f->linedefined = line; + checknext(ls, '('); + if (needself) { + new_localvarliteral(ls, "self", 0); + adjustlocalvars(ls, 1); + } + parlist(ls); + checknext(ls, ')'); + chunk(ls); + new_fs.f->lastlinedefined = ls->linenumber; + check_match(ls, TK_END, TK_FUNCTION, line); + close_func(ls); + pushclosure(ls, &new_fs, e); +} + + +static int explist1 (LexState *ls, expdesc *v) { + /* explist1 -> expr { `,' expr } */ + int n = 1; /* at least one expression */ + expr(ls, v); + while (testnext(ls, ',')) { + luaK_exp2nextreg(ls->fs, v); + expr(ls, v); + n++; + } + return n; +} + + +static void funcargs (LexState *ls, expdesc *f) { + FuncState *fs = ls->fs; + expdesc args; + int base, nparams; + int line = ls->linenumber; + switch (ls->t.token) { + case '(': { /* funcargs -> `(' [ explist1 ] `)' */ + if (line != ls->lastline) + luaX_syntaxerror(ls,"ambiguous syntax (function call x new statement)"); + luaX_next(ls); + if (ls->t.token == ')') /* arg list is empty? */ + args.k = VVOID; + else { + explist1(ls, &args); + luaK_setmultret(fs, &args); + } + check_match(ls, ')', '(', line); + break; + } + case '{': { /* funcargs -> constructor */ + constructor(ls, &args); + break; + } + case TK_STRING: { /* funcargs -> STRING */ + codestring(ls, &args, ls->t.seminfo.ts); + luaX_next(ls); /* must use `seminfo' before `next' */ + break; + } + default: { + luaX_syntaxerror(ls, "function arguments expected"); + return; + } + } + lua_assert(f->k == VNONRELOC); + base = f->u.s.info; /* base register for call */ + if (hasmultret(args.k)) + nparams = LUA_MULTRET; /* open call */ + else { + if (args.k != VVOID) + luaK_exp2nextreg(fs, &args); /* close last argument */ + nparams = fs->freereg - (base+1); + } + init_exp(f, VCALL, luaK_codeABC(fs, OP_CALL, base, nparams+1, 2)); + luaK_fixline(fs, line); + fs->freereg = base+1; /* call remove function and arguments and leaves + (unless changed) one result */ +} + + + + +/* +** {====================================================================== +** Expression parsing +** ======================================================================= +*/ + + +static void prefixexp (LexState *ls, expdesc *v) { + /* prefixexp -> NAME | '(' expr ')' */ + switch (ls->t.token) { + case '(': { + int line = ls->linenumber; + luaX_next(ls); + expr(ls, v); + check_match(ls, ')', '(', line); + luaK_dischargevars(ls->fs, v); + return; + } + case TK_NAME: { + singlevar(ls, v); + return; + } + default: { + luaX_syntaxerror(ls, "unexpected symbol"); + return; + } + } +} + + +static void primaryexp (LexState *ls, expdesc *v) { + /* primaryexp -> + prefixexp { `.' NAME | `[' exp `]' | `:' NAME funcargs | funcargs } */ + FuncState *fs = ls->fs; + prefixexp(ls, v); + for (;;) { + switch (ls->t.token) { + case '.': { /* field */ + field(ls, v); + break; + } + case '[': { /* `[' exp1 `]' */ + expdesc key; + luaK_exp2anyreg(fs, v); + yindex(ls, &key); + luaK_indexed(fs, v, &key); + break; + } + case ':': { /* `:' NAME funcargs */ + expdesc key; + luaX_next(ls); + checkname(ls, &key); + luaK_self(fs, v, &key); + funcargs(ls, v); + break; + } + case '(': case TK_STRING: case '{': { /* funcargs */ + luaK_exp2nextreg(fs, v); + funcargs(ls, v); + break; + } + default: return; + } + } +} + + +static void simpleexp (LexState *ls, expdesc *v) { + /* simpleexp -> NUMBER | STRING | NIL | true | false | ... | + constructor | FUNCTION body | primaryexp */ + switch (ls->t.token) { + case TK_NUMBER: { + init_exp(v, VKNUM, 0); + v->u.nval = ls->t.seminfo.r; + break; + } + case TK_STRING: { + codestring(ls, v, ls->t.seminfo.ts); + break; + } + case TK_NIL: { + init_exp(v, VNIL, 0); + break; + } + case TK_TRUE: { + init_exp(v, VTRUE, 0); + break; + } + case TK_FALSE: { + init_exp(v, VFALSE, 0); + break; + } + case TK_DOTS: { /* vararg */ + FuncState *fs = ls->fs; + check_condition(ls, fs->f->is_vararg, + "cannot use " LUA_QL("...") " outside a vararg function"); + fs->f->is_vararg &= ~VARARG_NEEDSARG; /* don't need 'arg' */ + init_exp(v, VVARARG, luaK_codeABC(fs, OP_VARARG, 0, 1, 0)); + break; + } + case '{': { /* constructor */ + constructor(ls, v); + return; + } + case TK_FUNCTION: { + luaX_next(ls); + body(ls, v, 0, ls->linenumber); + return; + } + default: { + primaryexp(ls, v); + return; + } + } + luaX_next(ls); +} + + +static UnOpr getunopr (int op) { + switch (op) { + case TK_NOT: return OPR_NOT; + case '-': return OPR_MINUS; + case '#': return OPR_LEN; + default: return OPR_NOUNOPR; + } +} + + +static BinOpr getbinopr (int op) { + switch (op) { + case '+': return OPR_ADD; + case '-': return OPR_SUB; + case '*': return OPR_MUL; + case '/': return OPR_DIV; + case '%': return OPR_MOD; + case '^': return OPR_POW; + case TK_CONCAT: return OPR_CONCAT; + case TK_NE: return OPR_NE; + case TK_EQ: return OPR_EQ; + case '<': return OPR_LT; + case TK_LE: return OPR_LE; + case '>': return OPR_GT; + case TK_GE: return OPR_GE; + case TK_AND: return OPR_AND; + case TK_OR: return OPR_OR; + default: return OPR_NOBINOPR; + } +} + + +static const struct { + lu_byte left; /* left priority for each binary operator */ + lu_byte right; /* right priority */ +} priority[] = { /* ORDER OPR */ + {6, 6}, {6, 6}, {7, 7}, {7, 7}, {7, 7}, /* `+' `-' `/' `%' */ + {10, 9}, {5, 4}, /* power and concat (right associative) */ + {3, 3}, {3, 3}, /* equality and inequality */ + {3, 3}, {3, 3}, {3, 3}, {3, 3}, /* order */ + {2, 2}, {1, 1} /* logical (and/or) */ +}; + +#define UNARY_PRIORITY 8 /* priority for unary operators */ + + +/* +** subexpr -> (simpleexp | unop subexpr) { binop subexpr } +** where `binop' is any binary operator with a priority higher than `limit' +*/ +static BinOpr subexpr (LexState *ls, expdesc *v, unsigned int limit) { + BinOpr op; + UnOpr uop; + enterlevel(ls); + uop = getunopr(ls->t.token); + if (uop != OPR_NOUNOPR) { + luaX_next(ls); + subexpr(ls, v, UNARY_PRIORITY); + luaK_prefix(ls->fs, uop, v); + } + else simpleexp(ls, v); + /* expand while operators have priorities higher than `limit' */ + op = getbinopr(ls->t.token); + while (op != OPR_NOBINOPR && priority[op].left > limit) { + expdesc v2; + BinOpr nextop; + luaX_next(ls); + luaK_infix(ls->fs, op, v); + /* read sub-expression with higher priority */ + nextop = subexpr(ls, &v2, priority[op].right); + luaK_posfix(ls->fs, op, v, &v2); + op = nextop; + } + leavelevel(ls); + return op; /* return first untreated operator */ +} + + +static void expr (LexState *ls, expdesc *v) { + subexpr(ls, v, 0); +} + +/* }==================================================================== */ + + + +/* +** {====================================================================== +** Rules for Statements +** ======================================================================= +*/ + + +static int block_follow (int token) { + switch (token) { + case TK_ELSE: case TK_ELSEIF: case TK_END: + case TK_UNTIL: case TK_EOS: + return 1; + default: return 0; + } +} + + +static void block (LexState *ls) { + /* block -> chunk */ + FuncState *fs = ls->fs; + BlockCnt bl; + enterblock(fs, &bl, 0); + chunk(ls); + lua_assert(bl.breaklist == NO_JUMP); + leaveblock(fs); +} + + +/* +** structure to chain all variables in the left-hand side of an +** assignment +*/ +struct LHS_assign { + struct LHS_assign *prev; + expdesc v; /* variable (global, local, upvalue, or indexed) */ +}; + + +/* +** check whether, in an assignment to a local variable, the local variable +** is needed in a previous assignment (to a table). If so, save original +** local value in a safe place and use this safe copy in the previous +** assignment. +*/ +static void check_conflict (LexState *ls, struct LHS_assign *lh, expdesc *v) { + FuncState *fs = ls->fs; + int extra = fs->freereg; /* eventual position to save local variable */ + int conflict = 0; + for (; lh; lh = lh->prev) { + if (lh->v.k == VINDEXED) { + if (lh->v.u.s.info == v->u.s.info) { /* conflict? */ + conflict = 1; + lh->v.u.s.info = extra; /* previous assignment will use safe copy */ + } + if (lh->v.u.s.aux == v->u.s.info) { /* conflict? */ + conflict = 1; + lh->v.u.s.aux = extra; /* previous assignment will use safe copy */ + } + } + } + if (conflict) { + luaK_codeABC(fs, OP_MOVE, fs->freereg, v->u.s.info, 0); /* make copy */ + luaK_reserveregs(fs, 1); + } +} + + +static void assignment (LexState *ls, struct LHS_assign *lh, int nvars) { + expdesc e; + check_condition(ls, VLOCAL <= lh->v.k && lh->v.k <= VINDEXED, + "syntax error"); + if (testnext(ls, ',')) { /* assignment -> `,' primaryexp assignment */ + struct LHS_assign nv; + nv.prev = lh; + primaryexp(ls, &nv.v); + if (nv.v.k == VLOCAL) + check_conflict(ls, lh, &nv.v); + luaY_checklimit(ls->fs, nvars, LUAI_MAXCCALLS - ls->L->nCcalls, + "variables in assignment"); + assignment(ls, &nv, nvars+1); + } + else { /* assignment -> `=' explist1 */ + int nexps; + checknext(ls, '='); + nexps = explist1(ls, &e); + if (nexps != nvars) { + adjust_assign(ls, nvars, nexps, &e); + if (nexps > nvars) + ls->fs->freereg -= nexps - nvars; /* remove extra values */ + } + else { + luaK_setoneret(ls->fs, &e); /* close last expression */ + luaK_storevar(ls->fs, &lh->v, &e); + return; /* avoid default */ + } + } + init_exp(&e, VNONRELOC, ls->fs->freereg-1); /* default assignment */ + luaK_storevar(ls->fs, &lh->v, &e); +} + + +static int cond (LexState *ls) { + /* cond -> exp */ + expdesc v; + expr(ls, &v); /* read condition */ + if (v.k == VNIL) v.k = VFALSE; /* `falses' are all equal here */ + luaK_goiftrue(ls->fs, &v); + return v.f; +} + + +static void breakstat (LexState *ls) { + FuncState *fs = ls->fs; + BlockCnt *bl = fs->bl; + int upval = 0; + while (bl && !bl->isbreakable) { + upval |= bl->upval; + bl = bl->previous; + } + if (!bl) + luaX_syntaxerror(ls, "no loop to break"); + if (upval) + luaK_codeABC(fs, OP_CLOSE, bl->nactvar, 0, 0); + luaK_concat(fs, &bl->breaklist, luaK_jump(fs)); +} + + +static void whilestat (LexState *ls, int line) { + /* whilestat -> WHILE cond DO block END */ + FuncState *fs = ls->fs; + int whileinit; + int condexit; + BlockCnt bl; + luaX_next(ls); /* skip WHILE */ + whileinit = luaK_getlabel(fs); + condexit = cond(ls); + enterblock(fs, &bl, 1); + checknext(ls, TK_DO); + block(ls); + luaK_patchlist(fs, luaK_jump(fs), whileinit); + check_match(ls, TK_END, TK_WHILE, line); + leaveblock(fs); + luaK_patchtohere(fs, condexit); /* false conditions finish the loop */ +} + + +static void repeatstat (LexState *ls, int line) { + /* repeatstat -> REPEAT block UNTIL cond */ + int condexit; + FuncState *fs = ls->fs; + int repeat_init = luaK_getlabel(fs); + BlockCnt bl1, bl2; + enterblock(fs, &bl1, 1); /* loop block */ + enterblock(fs, &bl2, 0); /* scope block */ + luaX_next(ls); /* skip REPEAT */ + chunk(ls); + check_match(ls, TK_UNTIL, TK_REPEAT, line); + condexit = cond(ls); /* read condition (inside scope block) */ + if (!bl2.upval) { /* no upvalues? */ + leaveblock(fs); /* finish scope */ + luaK_patchlist(ls->fs, condexit, repeat_init); /* close the loop */ + } + else { /* complete semantics when there are upvalues */ + breakstat(ls); /* if condition then break */ + luaK_patchtohere(ls->fs, condexit); /* else... */ + leaveblock(fs); /* finish scope... */ + luaK_patchlist(ls->fs, luaK_jump(fs), repeat_init); /* and repeat */ + } + leaveblock(fs); /* finish loop */ +} + + +static int exp1 (LexState *ls) { + expdesc e; + int k; + expr(ls, &e); + k = e.k; + luaK_exp2nextreg(ls->fs, &e); + return k; +} + + +static void forbody (LexState *ls, int base, int line, int nvars, int isnum) { + /* forbody -> DO block */ + BlockCnt bl; + FuncState *fs = ls->fs; + int prep, endfor; + adjustlocalvars(ls, 3); /* control variables */ + checknext(ls, TK_DO); + prep = isnum ? luaK_codeAsBx(fs, OP_FORPREP, base, NO_JUMP) : luaK_jump(fs); + enterblock(fs, &bl, 0); /* scope for declared variables */ + adjustlocalvars(ls, nvars); + luaK_reserveregs(fs, nvars); + block(ls); + leaveblock(fs); /* end of scope for declared variables */ + luaK_patchtohere(fs, prep); + endfor = (isnum) ? luaK_codeAsBx(fs, OP_FORLOOP, base, NO_JUMP) : + luaK_codeABC(fs, OP_TFORLOOP, base, 0, nvars); + luaK_fixline(fs, line); /* pretend that `OP_FOR' starts the loop */ + luaK_patchlist(fs, (isnum ? endfor : luaK_jump(fs)), prep + 1); +} + + +static void fornum (LexState *ls, TString *varname, int line) { + /* fornum -> NAME = exp1,exp1[,exp1] forbody */ + FuncState *fs = ls->fs; + int base = fs->freereg; + new_localvarliteral(ls, "(for index)", 0); + new_localvarliteral(ls, "(for limit)", 1); + new_localvarliteral(ls, "(for step)", 2); + new_localvar(ls, varname, 3); + checknext(ls, '='); + exp1(ls); /* initial value */ + checknext(ls, ','); + exp1(ls); /* limit */ + if (testnext(ls, ',')) + exp1(ls); /* optional step */ + else { /* default step = 1 */ + luaK_codeABx(fs, OP_LOADK, fs->freereg, luaK_numberK(fs, 1)); + luaK_reserveregs(fs, 1); + } + forbody(ls, base, line, 1, 1); +} + + +static void forlist (LexState *ls, TString *indexname) { + /* forlist -> NAME {,NAME} IN explist1 forbody */ + FuncState *fs = ls->fs; + expdesc e; + int nvars = 0; + int line; + int base = fs->freereg; + /* create control variables */ + new_localvarliteral(ls, "(for generator)", nvars++); + new_localvarliteral(ls, "(for state)", nvars++); + new_localvarliteral(ls, "(for control)", nvars++); + /* create declared variables */ + new_localvar(ls, indexname, nvars++); + while (testnext(ls, ',')) + new_localvar(ls, str_checkname(ls), nvars++); + checknext(ls, TK_IN); + line = ls->linenumber; + adjust_assign(ls, 3, explist1(ls, &e), &e); + luaK_checkstack(fs, 3); /* extra space to call generator */ + forbody(ls, base, line, nvars - 3, 0); +} + + +static void forstat (LexState *ls, int line) { + /* forstat -> FOR (fornum | forlist) END */ + FuncState *fs = ls->fs; + TString *varname; + BlockCnt bl; + enterblock(fs, &bl, 1); /* scope for loop and control variables */ + luaX_next(ls); /* skip `for' */ + varname = str_checkname(ls); /* first variable name */ + switch (ls->t.token) { + case '=': fornum(ls, varname, line); break; + case ',': case TK_IN: forlist(ls, varname); break; + default: luaX_syntaxerror(ls, LUA_QL("=") " or " LUA_QL("in") " expected"); + } + check_match(ls, TK_END, TK_FOR, line); + leaveblock(fs); /* loop scope (`break' jumps to this point) */ +} + + +static int test_then_block (LexState *ls) { + /* test_then_block -> [IF | ELSEIF] cond THEN block */ + int condexit; + luaX_next(ls); /* skip IF or ELSEIF */ + condexit = cond(ls); + checknext(ls, TK_THEN); + block(ls); /* `then' part */ + return condexit; +} + + +static void ifstat (LexState *ls, int line) { + /* ifstat -> IF cond THEN block {ELSEIF cond THEN block} [ELSE block] END */ + FuncState *fs = ls->fs; + int flist; + int escapelist = NO_JUMP; + flist = test_then_block(ls); /* IF cond THEN block */ + while (ls->t.token == TK_ELSEIF) { + luaK_concat(fs, &escapelist, luaK_jump(fs)); + luaK_patchtohere(fs, flist); + flist = test_then_block(ls); /* ELSEIF cond THEN block */ + } + if (ls->t.token == TK_ELSE) { + luaK_concat(fs, &escapelist, luaK_jump(fs)); + luaK_patchtohere(fs, flist); + luaX_next(ls); /* skip ELSE (after patch, for correct line info) */ + block(ls); /* `else' part */ + } + else + luaK_concat(fs, &escapelist, flist); + luaK_patchtohere(fs, escapelist); + check_match(ls, TK_END, TK_IF, line); +} + + +static void localfunc (LexState *ls) { + expdesc v, b; + FuncState *fs = ls->fs; + new_localvar(ls, str_checkname(ls), 0); + init_exp(&v, VLOCAL, fs->freereg); + luaK_reserveregs(fs, 1); + adjustlocalvars(ls, 1); + body(ls, &b, 0, ls->linenumber); + luaK_storevar(fs, &v, &b); + /* debug information will only see the variable after this point! */ + getlocvar(fs, fs->nactvar - 1).startpc = fs->pc; +} + + +static void localstat (LexState *ls) { + /* stat -> LOCAL NAME {`,' NAME} [`=' explist1] */ + int nvars = 0; + int nexps; + expdesc e; + do { + new_localvar(ls, str_checkname(ls), nvars++); + } while (testnext(ls, ',')); + if (testnext(ls, '=')) + nexps = explist1(ls, &e); + else { + e.k = VVOID; + nexps = 0; + } + adjust_assign(ls, nvars, nexps, &e); + adjustlocalvars(ls, nvars); +} + + +static int funcname (LexState *ls, expdesc *v) { + /* funcname -> NAME {field} [`:' NAME] */ + int needself = 0; + singlevar(ls, v); + while (ls->t.token == '.') + field(ls, v); + if (ls->t.token == ':') { + needself = 1; + field(ls, v); + } + return needself; +} + + +static void funcstat (LexState *ls, int line) { + /* funcstat -> FUNCTION funcname body */ + int needself; + expdesc v, b; + luaX_next(ls); /* skip FUNCTION */ + needself = funcname(ls, &v); + body(ls, &b, needself, line); + luaK_storevar(ls->fs, &v, &b); + luaK_fixline(ls->fs, line); /* definition `happens' in the first line */ +} + + +static void exprstat (LexState *ls) { + /* stat -> func | assignment */ + FuncState *fs = ls->fs; + struct LHS_assign v; + primaryexp(ls, &v.v); + if (v.v.k == VCALL) /* stat -> func */ + SETARG_C(getcode(fs, &v.v), 1); /* call statement uses no results */ + else { /* stat -> assignment */ + v.prev = NULL; + assignment(ls, &v, 1); + } +} + + +static void retstat (LexState *ls) { + /* stat -> RETURN explist */ + FuncState *fs = ls->fs; + expdesc e; + int first, nret; /* registers with returned values */ + luaX_next(ls); /* skip RETURN */ + if (block_follow(ls->t.token) || ls->t.token == ';') + first = nret = 0; /* return no values */ + else { + nret = explist1(ls, &e); /* optional return values */ + if (hasmultret(e.k)) { + luaK_setmultret(fs, &e); + if (e.k == VCALL && nret == 1) { /* tail call? */ + SET_OPCODE(getcode(fs,&e), OP_TAILCALL); + lua_assert(GETARG_A(getcode(fs,&e)) == fs->nactvar); + } + first = fs->nactvar; + nret = LUA_MULTRET; /* return all values */ + } + else { + if (nret == 1) /* only one single value? */ + first = luaK_exp2anyreg(fs, &e); + else { + luaK_exp2nextreg(fs, &e); /* values must go to the `stack' */ + first = fs->nactvar; /* return all `active' values */ + lua_assert(nret == fs->freereg - first); + } + } + } + luaK_ret(fs, first, nret); +} + + +static int statement (LexState *ls) { + int line = ls->linenumber; /* may be needed for error messages */ + switch (ls->t.token) { + case TK_IF: { /* stat -> ifstat */ + ifstat(ls, line); + return 0; + } + case TK_WHILE: { /* stat -> whilestat */ + whilestat(ls, line); + return 0; + } + case TK_DO: { /* stat -> DO block END */ + luaX_next(ls); /* skip DO */ + block(ls); + check_match(ls, TK_END, TK_DO, line); + return 0; + } + case TK_FOR: { /* stat -> forstat */ + forstat(ls, line); + return 0; + } + case TK_REPEAT: { /* stat -> repeatstat */ + repeatstat(ls, line); + return 0; + } + case TK_FUNCTION: { + funcstat(ls, line); /* stat -> funcstat */ + return 0; + } + case TK_LOCAL: { /* stat -> localstat */ + luaX_next(ls); /* skip LOCAL */ + if (testnext(ls, TK_FUNCTION)) /* local function? */ + localfunc(ls); + else + localstat(ls); + return 0; + } + case TK_RETURN: { /* stat -> retstat */ + retstat(ls); + return 1; /* must be last statement */ + } + case TK_BREAK: { /* stat -> breakstat */ + luaX_next(ls); /* skip BREAK */ + breakstat(ls); + return 1; /* must be last statement */ + } + default: { + exprstat(ls); + return 0; /* to avoid warnings */ + } + } +} + + +static void chunk (LexState *ls) { + /* chunk -> { stat [`;'] } */ + int islast = 0; + enterlevel(ls); + while (!islast && !block_follow(ls->t.token)) { + islast = statement(ls); + testnext(ls, ';'); + lua_assert(ls->fs->f->maxstacksize >= ls->fs->freereg && + ls->fs->freereg >= ls->fs->nactvar); + ls->fs->freereg = ls->fs->nactvar; /* free registers */ + } + leavelevel(ls); +} + +/* }====================================================================== */ diff --git a/deps/lua/src/lparser.h b/deps/lua/src/lparser.h new file mode 100644 index 0000000..18836af --- /dev/null +++ b/deps/lua/src/lparser.h @@ -0,0 +1,82 @@ +/* +** $Id: lparser.h,v 1.57.1.1 2007/12/27 13:02:25 roberto Exp $ +** Lua Parser +** See Copyright Notice in lua.h +*/ + +#ifndef lparser_h +#define lparser_h + +#include "llimits.h" +#include "lobject.h" +#include "lzio.h" + + +/* +** Expression descriptor +*/ + +typedef enum { + VVOID, /* no value */ + VNIL, + VTRUE, + VFALSE, + VK, /* info = index of constant in `k' */ + VKNUM, /* nval = numerical value */ + VLOCAL, /* info = local register */ + VUPVAL, /* info = index of upvalue in `upvalues' */ + VGLOBAL, /* info = index of table; aux = index of global name in `k' */ + VINDEXED, /* info = table register; aux = index register (or `k') */ + VJMP, /* info = instruction pc */ + VRELOCABLE, /* info = instruction pc */ + VNONRELOC, /* info = result register */ + VCALL, /* info = instruction pc */ + VVARARG /* info = instruction pc */ +} expkind; + +typedef struct expdesc { + expkind k; + union { + struct { int info, aux; } s; + lua_Number nval; + } u; + int t; /* patch list of `exit when true' */ + int f; /* patch list of `exit when false' */ +} expdesc; + + +typedef struct upvaldesc { + lu_byte k; + lu_byte info; +} upvaldesc; + + +struct BlockCnt; /* defined in lparser.c */ + + +/* state needed to generate code for a given function */ +typedef struct FuncState { + Proto *f; /* current function header */ + Table *h; /* table to find (and reuse) elements in `k' */ + struct FuncState *prev; /* enclosing function */ + struct LexState *ls; /* lexical state */ + struct lua_State *L; /* copy of the Lua state */ + struct BlockCnt *bl; /* chain of current blocks */ + int pc; /* next position to code (equivalent to `ncode') */ + int lasttarget; /* `pc' of last `jump target' */ + int jpc; /* list of pending jumps to `pc' */ + int freereg; /* first free register */ + int nk; /* number of elements in `k' */ + int np; /* number of elements in `p' */ + short nlocvars; /* number of elements in `locvars' */ + lu_byte nactvar; /* number of active local variables */ + upvaldesc upvalues[LUAI_MAXUPVALUES]; /* upvalues */ + unsigned short actvar[LUAI_MAXVARS]; /* declared-variable stack */ +} FuncState; + + +LUAI_FUNC Proto *luaY_parser (lua_State *L, ZIO *z, Mbuffer *buff, + const char *name); + + +#endif diff --git a/deps/lua/src/lstate.c b/deps/lua/src/lstate.c new file mode 100644 index 0000000..4313b83 --- /dev/null +++ b/deps/lua/src/lstate.c @@ -0,0 +1,214 @@ +/* +** $Id: lstate.c,v 2.36.1.2 2008/01/03 15:20:39 roberto Exp $ +** Global State +** See Copyright Notice in lua.h +*/ + + +#include + +#define lstate_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lgc.h" +#include "llex.h" +#include "lmem.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" + + +#define state_size(x) (sizeof(x) + LUAI_EXTRASPACE) +#define fromstate(l) (cast(lu_byte *, (l)) - LUAI_EXTRASPACE) +#define tostate(l) (cast(lua_State *, cast(lu_byte *, l) + LUAI_EXTRASPACE)) + + +/* +** Main thread combines a thread state and the global state +*/ +typedef struct LG { + lua_State l; + global_State g; +} LG; + + + +static void stack_init (lua_State *L1, lua_State *L) { + /* initialize CallInfo array */ + L1->base_ci = luaM_newvector(L, BASIC_CI_SIZE, CallInfo); + L1->ci = L1->base_ci; + L1->size_ci = BASIC_CI_SIZE; + L1->end_ci = L1->base_ci + L1->size_ci - 1; + /* initialize stack array */ + L1->stack = luaM_newvector(L, BASIC_STACK_SIZE + EXTRA_STACK, TValue); + L1->stacksize = BASIC_STACK_SIZE + EXTRA_STACK; + L1->top = L1->stack; + L1->stack_last = L1->stack+(L1->stacksize - EXTRA_STACK)-1; + /* initialize first ci */ + L1->ci->func = L1->top; + setnilvalue(L1->top++); /* `function' entry for this `ci' */ + L1->base = L1->ci->base = L1->top; + L1->ci->top = L1->top + LUA_MINSTACK; +} + + +static void freestack (lua_State *L, lua_State *L1) { + luaM_freearray(L, L1->base_ci, L1->size_ci, CallInfo); + luaM_freearray(L, L1->stack, L1->stacksize, TValue); +} + + +/* +** open parts that may cause memory-allocation errors +*/ +static void f_luaopen (lua_State *L, void *ud) { + global_State *g = G(L); + UNUSED(ud); + stack_init(L, L); /* init stack */ + sethvalue(L, gt(L), luaH_new(L, 0, 2)); /* table of globals */ + sethvalue(L, registry(L), luaH_new(L, 0, 2)); /* registry */ + luaS_resize(L, MINSTRTABSIZE); /* initial size of string table */ + luaT_init(L); + luaX_init(L); + luaS_fix(luaS_newliteral(L, MEMERRMSG)); + g->GCthreshold = 4*g->totalbytes; +} + + +static void preinit_state (lua_State *L, global_State *g) { + G(L) = g; + L->stack = NULL; + L->stacksize = 0; + L->errorJmp = NULL; + L->hook = NULL; + L->hookmask = 0; + L->basehookcount = 0; + L->allowhook = 1; + resethookcount(L); + L->openupval = NULL; + L->size_ci = 0; + L->nCcalls = L->baseCcalls = 0; + L->status = 0; + L->base_ci = L->ci = NULL; + L->savedpc = NULL; + L->errfunc = 0; + setnilvalue(gt(L)); +} + + +static void close_state (lua_State *L) { + global_State *g = G(L); + luaF_close(L, L->stack); /* close all upvalues for this thread */ + luaC_freeall(L); /* collect all objects */ + lua_assert(g->rootgc == obj2gco(L)); + lua_assert(g->strt.nuse == 0); + luaM_freearray(L, G(L)->strt.hash, G(L)->strt.size, TString *); + luaZ_freebuffer(L, &g->buff); + freestack(L, L); + lua_assert(g->totalbytes == sizeof(LG)); + (*g->frealloc)(g->ud, fromstate(L), state_size(LG), 0); +} + + +lua_State *luaE_newthread (lua_State *L) { + lua_State *L1 = tostate(luaM_malloc(L, state_size(lua_State))); + luaC_link(L, obj2gco(L1), LUA_TTHREAD); + preinit_state(L1, G(L)); + stack_init(L1, L); /* init stack */ + setobj2n(L, gt(L1), gt(L)); /* share table of globals */ + L1->hookmask = L->hookmask; + L1->basehookcount = L->basehookcount; + L1->hook = L->hook; + resethookcount(L1); + lua_assert(iswhite(obj2gco(L1))); + return L1; +} + + +void luaE_freethread (lua_State *L, lua_State *L1) { + luaF_close(L1, L1->stack); /* close all upvalues for this thread */ + lua_assert(L1->openupval == NULL); + luai_userstatefree(L1); + freestack(L, L1); + luaM_freemem(L, fromstate(L1), state_size(lua_State)); +} + + +LUA_API lua_State *lua_newstate (lua_Alloc f, void *ud) { + int i; + lua_State *L; + global_State *g; + void *l = (*f)(ud, NULL, 0, state_size(LG)); + if (l == NULL) return NULL; + L = tostate(l); + g = &((LG *)L)->g; + L->next = NULL; + L->tt = LUA_TTHREAD; + g->currentwhite = bit2mask(WHITE0BIT, FIXEDBIT); + L->marked = luaC_white(g); + set2bits(L->marked, FIXEDBIT, SFIXEDBIT); + preinit_state(L, g); + g->frealloc = f; + g->ud = ud; + g->mainthread = L; + g->uvhead.u.l.prev = &g->uvhead; + g->uvhead.u.l.next = &g->uvhead; + g->GCthreshold = 0; /* mark it as unfinished state */ + g->strt.size = 0; + g->strt.nuse = 0; + g->strt.hash = NULL; + setnilvalue(registry(L)); + luaZ_initbuffer(L, &g->buff); + g->panic = NULL; + g->gcstate = GCSpause; + g->rootgc = obj2gco(L); + g->sweepstrgc = 0; + g->sweepgc = &g->rootgc; + g->gray = NULL; + g->grayagain = NULL; + g->weak = NULL; + g->tmudata = NULL; + g->totalbytes = sizeof(LG); + g->gcpause = LUAI_GCPAUSE; + g->gcstepmul = LUAI_GCMUL; + g->gcdept = 0; + for (i=0; imt[i] = NULL; + if (luaD_rawrunprotected(L, f_luaopen, NULL) != 0) { + /* memory allocation error: free partial state */ + close_state(L); + L = NULL; + } + else + luai_userstateopen(L); + return L; +} + + +static void callallgcTM (lua_State *L, void *ud) { + UNUSED(ud); + luaC_callGCTM(L); /* call GC metamethods for all udata */ +} + + +LUA_API void lua_close (lua_State *L) { + L = G(L)->mainthread; /* only the main thread can be closed */ + lua_lock(L); + luaF_close(L, L->stack); /* close all upvalues for this thread */ + luaC_separateudata(L, 1); /* separate udata that have GC metamethods */ + L->errfunc = 0; /* no error function during GC metamethods */ + do { /* repeat until no more errors */ + L->ci = L->base_ci; + L->base = L->top = L->ci->base; + L->nCcalls = L->baseCcalls = 0; + } while (luaD_rawrunprotected(L, callallgcTM, NULL) != 0); + lua_assert(G(L)->tmudata == NULL); + luai_userstateclose(L); + close_state(L); +} + diff --git a/deps/lua/src/lstate.h b/deps/lua/src/lstate.h new file mode 100644 index 0000000..3bc575b --- /dev/null +++ b/deps/lua/src/lstate.h @@ -0,0 +1,169 @@ +/* +** $Id: lstate.h,v 2.24.1.2 2008/01/03 15:20:39 roberto Exp $ +** Global State +** See Copyright Notice in lua.h +*/ + +#ifndef lstate_h +#define lstate_h + +#include "lua.h" + +#include "lobject.h" +#include "ltm.h" +#include "lzio.h" + + + +struct lua_longjmp; /* defined in ldo.c */ + + +/* table of globals */ +#define gt(L) (&L->l_gt) + +/* registry */ +#define registry(L) (&G(L)->l_registry) + + +/* extra stack space to handle TM calls and some other extras */ +#define EXTRA_STACK 5 + + +#define BASIC_CI_SIZE 8 + +#define BASIC_STACK_SIZE (2*LUA_MINSTACK) + + + +typedef struct stringtable { + GCObject **hash; + lu_int32 nuse; /* number of elements */ + int size; +} stringtable; + + +/* +** informations about a call +*/ +typedef struct CallInfo { + StkId base; /* base for this function */ + StkId func; /* function index in the stack */ + StkId top; /* top for this function */ + const Instruction *savedpc; + int nresults; /* expected number of results from this function */ + int tailcalls; /* number of tail calls lost under this entry */ +} CallInfo; + + + +#define curr_func(L) (clvalue(L->ci->func)) +#define ci_func(ci) (clvalue((ci)->func)) +#define f_isLua(ci) (!ci_func(ci)->c.isC) +#define isLua(ci) (ttisfunction((ci)->func) && f_isLua(ci)) + + +/* +** `global state', shared by all threads of this state +*/ +typedef struct global_State { + stringtable strt; /* hash table for strings */ + lua_Alloc frealloc; /* function to reallocate memory */ + void *ud; /* auxiliary data to `frealloc' */ + lu_byte currentwhite; + lu_byte gcstate; /* state of garbage collector */ + int sweepstrgc; /* position of sweep in `strt' */ + GCObject *rootgc; /* list of all collectable objects */ + GCObject **sweepgc; /* position of sweep in `rootgc' */ + GCObject *gray; /* list of gray objects */ + GCObject *grayagain; /* list of objects to be traversed atomically */ + GCObject *weak; /* list of weak tables (to be cleared) */ + GCObject *tmudata; /* last element of list of userdata to be GC */ + Mbuffer buff; /* temporary buffer for string concatentation */ + lu_mem GCthreshold; + lu_mem totalbytes; /* number of bytes currently allocated */ + lu_mem estimate; /* an estimate of number of bytes actually in use */ + lu_mem gcdept; /* how much GC is `behind schedule' */ + int gcpause; /* size of pause between successive GCs */ + int gcstepmul; /* GC `granularity' */ + lua_CFunction panic; /* to be called in unprotected errors */ + TValue l_registry; + struct lua_State *mainthread; + UpVal uvhead; /* head of double-linked list of all open upvalues */ + struct Table *mt[NUM_TAGS]; /* metatables for basic types */ + TString *tmname[TM_N]; /* array with tag-method names */ +} global_State; + + +/* +** `per thread' state +*/ +struct lua_State { + CommonHeader; + lu_byte status; + StkId top; /* first free slot in the stack */ + StkId base; /* base of current function */ + global_State *l_G; + CallInfo *ci; /* call info for current function */ + const Instruction *savedpc; /* `savedpc' of current function */ + StkId stack_last; /* last free slot in the stack */ + StkId stack; /* stack base */ + CallInfo *end_ci; /* points after end of ci array*/ + CallInfo *base_ci; /* array of CallInfo's */ + int stacksize; + int size_ci; /* size of array `base_ci' */ + unsigned short nCcalls; /* number of nested C calls */ + unsigned short baseCcalls; /* nested C calls when resuming coroutine */ + lu_byte hookmask; + lu_byte allowhook; + int basehookcount; + int hookcount; + lua_Hook hook; + TValue l_gt; /* table of globals */ + TValue env; /* temporary place for environments */ + GCObject *openupval; /* list of open upvalues in this stack */ + GCObject *gclist; + struct lua_longjmp *errorJmp; /* current error recover point */ + ptrdiff_t errfunc; /* current error handling function (stack index) */ +}; + + +#define G(L) (L->l_G) + + +/* +** Union of all collectable objects +*/ +union GCObject { + GCheader gch; + union TString ts; + union Udata u; + union Closure cl; + struct Table h; + struct Proto p; + struct UpVal uv; + struct lua_State th; /* thread */ +}; + + +/* macros to convert a GCObject into a specific value */ +#define rawgco2ts(o) check_exp((o)->gch.tt == LUA_TSTRING, &((o)->ts)) +#define gco2ts(o) (&rawgco2ts(o)->tsv) +#define rawgco2u(o) check_exp((o)->gch.tt == LUA_TUSERDATA, &((o)->u)) +#define gco2u(o) (&rawgco2u(o)->uv) +#define gco2cl(o) check_exp((o)->gch.tt == LUA_TFUNCTION, &((o)->cl)) +#define gco2h(o) check_exp((o)->gch.tt == LUA_TTABLE, &((o)->h)) +#define gco2p(o) check_exp((o)->gch.tt == LUA_TPROTO, &((o)->p)) +#define gco2uv(o) check_exp((o)->gch.tt == LUA_TUPVAL, &((o)->uv)) +#define ngcotouv(o) \ + check_exp((o) == NULL || (o)->gch.tt == LUA_TUPVAL, &((o)->uv)) +#define gco2th(o) check_exp((o)->gch.tt == LUA_TTHREAD, &((o)->th)) + +/* macro to convert any Lua object into a GCObject */ +#define obj2gco(v) (cast(GCObject *, (v))) + + +LUAI_FUNC lua_State *luaE_newthread (lua_State *L); +LUAI_FUNC void luaE_freethread (lua_State *L, lua_State *L1); + +#endif + diff --git a/deps/lua/src/lstring.c b/deps/lua/src/lstring.c new file mode 100644 index 0000000..6a825f7 --- /dev/null +++ b/deps/lua/src/lstring.c @@ -0,0 +1,111 @@ +/* +** $Id: lstring.c,v 2.8.1.1 2007/12/27 13:02:25 roberto Exp $ +** String table (keeps all strings handled by Lua) +** See Copyright Notice in lua.h +*/ + + +#include + +#define lstring_c +#define LUA_CORE + +#include "lua.h" + +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" +#include "lstring.h" + + + +void luaS_resize (lua_State *L, int newsize) { + GCObject **newhash; + stringtable *tb; + int i; + if (G(L)->gcstate == GCSsweepstring) + return; /* cannot resize during GC traverse */ + newhash = luaM_newvector(L, newsize, GCObject *); + tb = &G(L)->strt; + for (i=0; isize; i++) { + GCObject *p = tb->hash[i]; + while (p) { /* for each node in the list */ + GCObject *next = p->gch.next; /* save next */ + unsigned int h = gco2ts(p)->hash; + int h1 = lmod(h, newsize); /* new position */ + lua_assert(cast_int(h%newsize) == lmod(h, newsize)); + p->gch.next = newhash[h1]; /* chain it */ + newhash[h1] = p; + p = next; + } + } + luaM_freearray(L, tb->hash, tb->size, TString *); + tb->size = newsize; + tb->hash = newhash; +} + + +static TString *newlstr (lua_State *L, const char *str, size_t l, + unsigned int h) { + TString *ts; + stringtable *tb; + if (l+1 > (MAX_SIZET - sizeof(TString))/sizeof(char)) + luaM_toobig(L); + ts = cast(TString *, luaM_malloc(L, (l+1)*sizeof(char)+sizeof(TString))); + ts->tsv.len = l; + ts->tsv.hash = h; + ts->tsv.marked = luaC_white(G(L)); + ts->tsv.tt = LUA_TSTRING; + ts->tsv.reserved = 0; + memcpy(ts+1, str, l*sizeof(char)); + ((char *)(ts+1))[l] = '\0'; /* ending 0 */ + tb = &G(L)->strt; + h = lmod(h, tb->size); + ts->tsv.next = tb->hash[h]; /* chain new entry */ + tb->hash[h] = obj2gco(ts); + tb->nuse++; + if (tb->nuse > cast(lu_int32, tb->size) && tb->size <= MAX_INT/2) + luaS_resize(L, tb->size*2); /* too crowded */ + return ts; +} + + +TString *luaS_newlstr (lua_State *L, const char *str, size_t l) { + GCObject *o; + unsigned int h = cast(unsigned int, l); /* seed */ + size_t step = 1; + size_t l1; + for (l1=l; l1>=step; l1-=step) /* compute hash */ + h = h ^ ((h<<5)+(h>>2)+cast(unsigned char, str[l1-1])); + for (o = G(L)->strt.hash[lmod(h, G(L)->strt.size)]; + o != NULL; + o = o->gch.next) { + TString *ts = rawgco2ts(o); + if (ts->tsv.len == l && (memcmp(str, getstr(ts), l) == 0)) { + /* string may be dead */ + if (isdead(G(L), o)) changewhite(o); + return ts; + } + } + return newlstr(L, str, l, h); /* not found */ +} + + +Udata *luaS_newudata (lua_State *L, size_t s, Table *e) { + Udata *u; + if (s > MAX_SIZET - sizeof(Udata)) + luaM_toobig(L); + u = cast(Udata *, luaM_malloc(L, s + sizeof(Udata))); + u->uv.marked = luaC_white(G(L)); /* is not finalized */ + u->uv.tt = LUA_TUSERDATA; + u->uv.len = s; + u->uv.metatable = NULL; + u->uv.env = e; + /* chain it on udata list (after main thread) */ + u->uv.next = G(L)->mainthread->next; + G(L)->mainthread->next = obj2gco(u); + return u; +} + diff --git a/deps/lua/src/lstring.h b/deps/lua/src/lstring.h new file mode 100644 index 0000000..73a2ff8 --- /dev/null +++ b/deps/lua/src/lstring.h @@ -0,0 +1,31 @@ +/* +** $Id: lstring.h,v 1.43.1.1 2007/12/27 13:02:25 roberto Exp $ +** String table (keep all strings handled by Lua) +** See Copyright Notice in lua.h +*/ + +#ifndef lstring_h +#define lstring_h + + +#include "lgc.h" +#include "lobject.h" +#include "lstate.h" + + +#define sizestring(s) (sizeof(union TString)+((s)->len+1)*sizeof(char)) + +#define sizeudata(u) (sizeof(union Udata)+(u)->len) + +#define luaS_new(L, s) (luaS_newlstr(L, s, strlen(s))) +#define luaS_newliteral(L, s) (luaS_newlstr(L, "" s, \ + (sizeof(s)/sizeof(char))-1)) + +#define luaS_fix(s) l_setbit((s)->tsv.marked, FIXEDBIT) + +LUAI_FUNC void luaS_resize (lua_State *L, int newsize); +LUAI_FUNC Udata *luaS_newudata (lua_State *L, size_t s, Table *e); +LUAI_FUNC TString *luaS_newlstr (lua_State *L, const char *str, size_t l); + + +#endif diff --git a/deps/lua/src/lstrlib.c b/deps/lua/src/lstrlib.c new file mode 100644 index 0000000..7a03489 --- /dev/null +++ b/deps/lua/src/lstrlib.c @@ -0,0 +1,871 @@ +/* +** $Id: lstrlib.c,v 1.132.1.5 2010/05/14 15:34:19 roberto Exp $ +** Standard library for string operations and pattern-matching +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include +#include +#include + +#define lstrlib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + +/* macro to `unsign' a character */ +#define uchar(c) ((unsigned char)(c)) + + + +static int str_len (lua_State *L) { + size_t l; + luaL_checklstring(L, 1, &l); + lua_pushinteger(L, l); + return 1; +} + + +static ptrdiff_t posrelat (ptrdiff_t pos, size_t len) { + /* relative string position: negative means back from end */ + if (pos < 0) pos += (ptrdiff_t)len + 1; + return (pos >= 0) ? pos : 0; +} + + +static int str_sub (lua_State *L) { + size_t l; + const char *s = luaL_checklstring(L, 1, &l); + ptrdiff_t start = posrelat(luaL_checkinteger(L, 2), l); + ptrdiff_t end = posrelat(luaL_optinteger(L, 3, -1), l); + if (start < 1) start = 1; + if (end > (ptrdiff_t)l) end = (ptrdiff_t)l; + if (start <= end) + lua_pushlstring(L, s+start-1, end-start+1); + else lua_pushliteral(L, ""); + return 1; +} + + +static int str_reverse (lua_State *L) { + size_t l; + luaL_Buffer b; + const char *s = luaL_checklstring(L, 1, &l); + luaL_buffinit(L, &b); + while (l--) luaL_addchar(&b, s[l]); + luaL_pushresult(&b); + return 1; +} + + +static int str_lower (lua_State *L) { + size_t l; + size_t i; + luaL_Buffer b; + const char *s = luaL_checklstring(L, 1, &l); + luaL_buffinit(L, &b); + for (i=0; i 0) + luaL_addlstring(&b, s, l); + luaL_pushresult(&b); + return 1; +} + + +static int str_byte (lua_State *L) { + size_t l; + const char *s = luaL_checklstring(L, 1, &l); + ptrdiff_t posi = posrelat(luaL_optinteger(L, 2, 1), l); + ptrdiff_t pose = posrelat(luaL_optinteger(L, 3, posi), l); + int n, i; + if (posi <= 0) posi = 1; + if ((size_t)pose > l) pose = l; + if (posi > pose) return 0; /* empty interval; return no values */ + n = (int)(pose - posi + 1); + if (posi + n <= pose) /* overflow? */ + luaL_error(L, "string slice too long"); + luaL_checkstack(L, n, "string slice too long"); + for (i=0; i= ms->level || ms->capture[l].len == CAP_UNFINISHED) + return luaL_error(ms->L, "invalid capture index"); + return l; +} + + +static int capture_to_close (MatchState *ms) { + int level = ms->level; + for (level--; level>=0; level--) + if (ms->capture[level].len == CAP_UNFINISHED) return level; + return luaL_error(ms->L, "invalid pattern capture"); +} + + +static const char *classend (MatchState *ms, const char *p) { + switch (*p++) { + case L_ESC: { + if (*p == '\0') + luaL_error(ms->L, "malformed pattern (ends with " LUA_QL("%%") ")"); + return p+1; + } + case '[': { + if (*p == '^') p++; + do { /* look for a `]' */ + if (*p == '\0') + luaL_error(ms->L, "malformed pattern (missing " LUA_QL("]") ")"); + if (*(p++) == L_ESC && *p != '\0') + p++; /* skip escapes (e.g. `%]') */ + } while (*p != ']'); + return p+1; + } + default: { + return p; + } + } +} + + +static int match_class (int c, int cl) { + int res; + switch (tolower(cl)) { + case 'a' : res = isalpha(c); break; + case 'c' : res = iscntrl(c); break; + case 'd' : res = isdigit(c); break; + case 'l' : res = islower(c); break; + case 'p' : res = ispunct(c); break; + case 's' : res = isspace(c); break; + case 'u' : res = isupper(c); break; + case 'w' : res = isalnum(c); break; + case 'x' : res = isxdigit(c); break; + case 'z' : res = (c == 0); break; + default: return (cl == c); + } + return (islower(cl) ? res : !res); +} + + +static int matchbracketclass (int c, const char *p, const char *ec) { + int sig = 1; + if (*(p+1) == '^') { + sig = 0; + p++; /* skip the `^' */ + } + while (++p < ec) { + if (*p == L_ESC) { + p++; + if (match_class(c, uchar(*p))) + return sig; + } + else if ((*(p+1) == '-') && (p+2 < ec)) { + p+=2; + if (uchar(*(p-2)) <= c && c <= uchar(*p)) + return sig; + } + else if (uchar(*p) == c) return sig; + } + return !sig; +} + + +static int singlematch (int c, const char *p, const char *ep) { + switch (*p) { + case '.': return 1; /* matches any char */ + case L_ESC: return match_class(c, uchar(*(p+1))); + case '[': return matchbracketclass(c, p, ep-1); + default: return (uchar(*p) == c); + } +} + + +static const char *match (MatchState *ms, const char *s, const char *p); + + +static const char *matchbalance (MatchState *ms, const char *s, + const char *p) { + if (*p == 0 || *(p+1) == 0) + luaL_error(ms->L, "unbalanced pattern"); + if (*s != *p) return NULL; + else { + int b = *p; + int e = *(p+1); + int cont = 1; + while (++s < ms->src_end) { + if (*s == e) { + if (--cont == 0) return s+1; + } + else if (*s == b) cont++; + } + } + return NULL; /* string ends out of balance */ +} + + +static const char *max_expand (MatchState *ms, const char *s, + const char *p, const char *ep) { + ptrdiff_t i = 0; /* counts maximum expand for item */ + while ((s+i)src_end && singlematch(uchar(*(s+i)), p, ep)) + i++; + /* keeps trying to match with the maximum repetitions */ + while (i>=0) { + const char *res = match(ms, (s+i), ep+1); + if (res) return res; + i--; /* else didn't match; reduce 1 repetition to try again */ + } + return NULL; +} + + +static const char *min_expand (MatchState *ms, const char *s, + const char *p, const char *ep) { + for (;;) { + const char *res = match(ms, s, ep+1); + if (res != NULL) + return res; + else if (ssrc_end && singlematch(uchar(*s), p, ep)) + s++; /* try with one more repetition */ + else return NULL; + } +} + + +static const char *start_capture (MatchState *ms, const char *s, + const char *p, int what) { + const char *res; + int level = ms->level; + if (level >= LUA_MAXCAPTURES) luaL_error(ms->L, "too many captures"); + ms->capture[level].init = s; + ms->capture[level].len = what; + ms->level = level+1; + if ((res=match(ms, s, p)) == NULL) /* match failed? */ + ms->level--; /* undo capture */ + return res; +} + + +static const char *end_capture (MatchState *ms, const char *s, + const char *p) { + int l = capture_to_close(ms); + const char *res; + ms->capture[l].len = s - ms->capture[l].init; /* close capture */ + if ((res = match(ms, s, p)) == NULL) /* match failed? */ + ms->capture[l].len = CAP_UNFINISHED; /* undo capture */ + return res; +} + + +static const char *match_capture (MatchState *ms, const char *s, int l) { + size_t len; + l = check_capture(ms, l); + len = ms->capture[l].len; + if ((size_t)(ms->src_end-s) >= len && + memcmp(ms->capture[l].init, s, len) == 0) + return s+len; + else return NULL; +} + + +static const char *match (MatchState *ms, const char *s, const char *p) { + init: /* using goto's to optimize tail recursion */ + switch (*p) { + case '(': { /* start capture */ + if (*(p+1) == ')') /* position capture? */ + return start_capture(ms, s, p+2, CAP_POSITION); + else + return start_capture(ms, s, p+1, CAP_UNFINISHED); + } + case ')': { /* end capture */ + return end_capture(ms, s, p+1); + } + case L_ESC: { + switch (*(p+1)) { + case 'b': { /* balanced string? */ + s = matchbalance(ms, s, p+2); + if (s == NULL) return NULL; + p+=4; goto init; /* else return match(ms, s, p+4); */ + } + case 'f': { /* frontier? */ + const char *ep; char previous; + p += 2; + if (*p != '[') + luaL_error(ms->L, "missing " LUA_QL("[") " after " + LUA_QL("%%f") " in pattern"); + ep = classend(ms, p); /* points to what is next */ + previous = (s == ms->src_init) ? '\0' : *(s-1); + if (matchbracketclass(uchar(previous), p, ep-1) || + !matchbracketclass(uchar(*s), p, ep-1)) return NULL; + p=ep; goto init; /* else return match(ms, s, ep); */ + } + default: { + if (isdigit(uchar(*(p+1)))) { /* capture results (%0-%9)? */ + s = match_capture(ms, s, uchar(*(p+1))); + if (s == NULL) return NULL; + p+=2; goto init; /* else return match(ms, s, p+2) */ + } + goto dflt; /* case default */ + } + } + } + case '\0': { /* end of pattern */ + return s; /* match succeeded */ + } + case '$': { + if (*(p+1) == '\0') /* is the `$' the last char in pattern? */ + return (s == ms->src_end) ? s : NULL; /* check end of string */ + else goto dflt; + } + default: dflt: { /* it is a pattern item */ + const char *ep = classend(ms, p); /* points to what is next */ + int m = ssrc_end && singlematch(uchar(*s), p, ep); + switch (*ep) { + case '?': { /* optional */ + const char *res; + if (m && ((res=match(ms, s+1, ep+1)) != NULL)) + return res; + p=ep+1; goto init; /* else return match(ms, s, ep+1); */ + } + case '*': { /* 0 or more repetitions */ + return max_expand(ms, s, p, ep); + } + case '+': { /* 1 or more repetitions */ + return (m ? max_expand(ms, s+1, p, ep) : NULL); + } + case '-': { /* 0 or more repetitions (minimum) */ + return min_expand(ms, s, p, ep); + } + default: { + if (!m) return NULL; + s++; p=ep; goto init; /* else return match(ms, s+1, ep); */ + } + } + } + } +} + + + +static const char *lmemfind (const char *s1, size_t l1, + const char *s2, size_t l2) { + if (l2 == 0) return s1; /* empty strings are everywhere */ + else if (l2 > l1) return NULL; /* avoids a negative `l1' */ + else { + const char *init; /* to search for a `*s2' inside `s1' */ + l2--; /* 1st char will be checked by `memchr' */ + l1 = l1-l2; /* `s2' cannot be found after that */ + while (l1 > 0 && (init = (const char *)memchr(s1, *s2, l1)) != NULL) { + init++; /* 1st char is already checked */ + if (memcmp(init, s2+1, l2) == 0) + return init-1; + else { /* correct `l1' and `s1' to try again */ + l1 -= init-s1; + s1 = init; + } + } + return NULL; /* not found */ + } +} + + +static void push_onecapture (MatchState *ms, int i, const char *s, + const char *e) { + if (i >= ms->level) { + if (i == 0) /* ms->level == 0, too */ + lua_pushlstring(ms->L, s, e - s); /* add whole match */ + else + luaL_error(ms->L, "invalid capture index"); + } + else { + ptrdiff_t l = ms->capture[i].len; + if (l == CAP_UNFINISHED) luaL_error(ms->L, "unfinished capture"); + if (l == CAP_POSITION) + lua_pushinteger(ms->L, ms->capture[i].init - ms->src_init + 1); + else + lua_pushlstring(ms->L, ms->capture[i].init, l); + } +} + + +static int push_captures (MatchState *ms, const char *s, const char *e) { + int i; + int nlevels = (ms->level == 0 && s) ? 1 : ms->level; + luaL_checkstack(ms->L, nlevels, "too many captures"); + for (i = 0; i < nlevels; i++) + push_onecapture(ms, i, s, e); + return nlevels; /* number of strings pushed */ +} + + +static int str_find_aux (lua_State *L, int find) { + size_t l1, l2; + const char *s = luaL_checklstring(L, 1, &l1); + const char *p = luaL_checklstring(L, 2, &l2); + ptrdiff_t init = posrelat(luaL_optinteger(L, 3, 1), l1) - 1; + if (init < 0) init = 0; + else if ((size_t)(init) > l1) init = (ptrdiff_t)l1; + if (find && (lua_toboolean(L, 4) || /* explicit request? */ + strpbrk(p, SPECIALS) == NULL)) { /* or no special characters? */ + /* do a plain search */ + const char *s2 = lmemfind(s+init, l1-init, p, l2); + if (s2) { + lua_pushinteger(L, s2-s+1); + lua_pushinteger(L, s2-s+l2); + return 2; + } + } + else { + MatchState ms; + int anchor = (*p == '^') ? (p++, 1) : 0; + const char *s1=s+init; + ms.L = L; + ms.src_init = s; + ms.src_end = s+l1; + do { + const char *res; + ms.level = 0; + if ((res=match(&ms, s1, p)) != NULL) { + if (find) { + lua_pushinteger(L, s1-s+1); /* start */ + lua_pushinteger(L, res-s); /* end */ + return push_captures(&ms, NULL, 0) + 2; + } + else + return push_captures(&ms, s1, res); + } + } while (s1++ < ms.src_end && !anchor); + } + lua_pushnil(L); /* not found */ + return 1; +} + + +static int str_find (lua_State *L) { + return str_find_aux(L, 1); +} + + +static int str_match (lua_State *L) { + return str_find_aux(L, 0); +} + + +static int gmatch_aux (lua_State *L) { + MatchState ms; + size_t ls; + const char *s = lua_tolstring(L, lua_upvalueindex(1), &ls); + const char *p = lua_tostring(L, lua_upvalueindex(2)); + const char *src; + ms.L = L; + ms.src_init = s; + ms.src_end = s+ls; + for (src = s + (size_t)lua_tointeger(L, lua_upvalueindex(3)); + src <= ms.src_end; + src++) { + const char *e; + ms.level = 0; + if ((e = match(&ms, src, p)) != NULL) { + lua_Integer newstart = e-s; + if (e == src) newstart++; /* empty match? go at least one position */ + lua_pushinteger(L, newstart); + lua_replace(L, lua_upvalueindex(3)); + return push_captures(&ms, src, e); + } + } + return 0; /* not found */ +} + + +static int gmatch (lua_State *L) { + luaL_checkstring(L, 1); + luaL_checkstring(L, 2); + lua_settop(L, 2); + lua_pushinteger(L, 0); + lua_pushcclosure(L, gmatch_aux, 3); + return 1; +} + + +static int gfind_nodef (lua_State *L) { + return luaL_error(L, LUA_QL("string.gfind") " was renamed to " + LUA_QL("string.gmatch")); +} + + +static void add_s (MatchState *ms, luaL_Buffer *b, const char *s, + const char *e) { + size_t l, i; + const char *news = lua_tolstring(ms->L, 3, &l); + for (i = 0; i < l; i++) { + if (news[i] != L_ESC) + luaL_addchar(b, news[i]); + else { + i++; /* skip ESC */ + if (!isdigit(uchar(news[i]))) + luaL_addchar(b, news[i]); + else if (news[i] == '0') + luaL_addlstring(b, s, e - s); + else { + push_onecapture(ms, news[i] - '1', s, e); + luaL_addvalue(b); /* add capture to accumulated result */ + } + } + } +} + + +static void add_value (MatchState *ms, luaL_Buffer *b, const char *s, + const char *e) { + lua_State *L = ms->L; + switch (lua_type(L, 3)) { + case LUA_TNUMBER: + case LUA_TSTRING: { + add_s(ms, b, s, e); + return; + } + case LUA_TFUNCTION: { + int n; + lua_pushvalue(L, 3); + n = push_captures(ms, s, e); + lua_call(L, n, 1); + break; + } + case LUA_TTABLE: { + push_onecapture(ms, 0, s, e); + lua_gettable(L, 3); + break; + } + } + if (!lua_toboolean(L, -1)) { /* nil or false? */ + lua_pop(L, 1); + lua_pushlstring(L, s, e - s); /* keep original text */ + } + else if (!lua_isstring(L, -1)) + luaL_error(L, "invalid replacement value (a %s)", luaL_typename(L, -1)); + luaL_addvalue(b); /* add result to accumulator */ +} + + +static int str_gsub (lua_State *L) { + size_t srcl; + const char *src = luaL_checklstring(L, 1, &srcl); + const char *p = luaL_checkstring(L, 2); + int tr = lua_type(L, 3); + int max_s = luaL_optint(L, 4, srcl+1); + int anchor = (*p == '^') ? (p++, 1) : 0; + int n = 0; + MatchState ms; + luaL_Buffer b; + luaL_argcheck(L, tr == LUA_TNUMBER || tr == LUA_TSTRING || + tr == LUA_TFUNCTION || tr == LUA_TTABLE, 3, + "string/function/table expected"); + luaL_buffinit(L, &b); + ms.L = L; + ms.src_init = src; + ms.src_end = src+srcl; + while (n < max_s) { + const char *e; + ms.level = 0; + e = match(&ms, src, p); + if (e) { + n++; + add_value(&ms, &b, src, e); + } + if (e && e>src) /* non empty match? */ + src = e; /* skip it */ + else if (src < ms.src_end) + luaL_addchar(&b, *src++); + else break; + if (anchor) break; + } + luaL_addlstring(&b, src, ms.src_end-src); + luaL_pushresult(&b); + lua_pushinteger(L, n); /* number of substitutions */ + return 2; +} + +/* }====================================================== */ + + +/* maximum size of each formatted item (> len(format('%99.99f', -1e308))) */ +#define MAX_ITEM 512 +/* valid flags in a format specification */ +#define FLAGS "-+ #0" +/* +** maximum size of each format specification (such as '%-099.99d') +** (+10 accounts for %99.99x plus margin of error) +*/ +#define MAX_FORMAT (sizeof(FLAGS) + sizeof(LUA_INTFRMLEN) + 10) + + +static void addquoted (lua_State *L, luaL_Buffer *b, int arg) { + size_t l; + const char *s = luaL_checklstring(L, arg, &l); + luaL_addchar(b, '"'); + while (l--) { + switch (*s) { + case '"': case '\\': case '\n': { + luaL_addchar(b, '\\'); + luaL_addchar(b, *s); + break; + } + case '\r': { + luaL_addlstring(b, "\\r", 2); + break; + } + case '\0': { + luaL_addlstring(b, "\\000", 4); + break; + } + default: { + luaL_addchar(b, *s); + break; + } + } + s++; + } + luaL_addchar(b, '"'); +} + +static const char *scanformat (lua_State *L, const char *strfrmt, char *form) { + const char *p = strfrmt; + while (*p != '\0' && strchr(FLAGS, *p) != NULL) p++; /* skip flags */ + if ((size_t)(p - strfrmt) >= sizeof(FLAGS)) + luaL_error(L, "invalid format (repeated flags)"); + if (isdigit(uchar(*p))) p++; /* skip width */ + if (isdigit(uchar(*p))) p++; /* (2 digits at most) */ + if (*p == '.') { + p++; + if (isdigit(uchar(*p))) p++; /* skip precision */ + if (isdigit(uchar(*p))) p++; /* (2 digits at most) */ + } + if (isdigit(uchar(*p))) + luaL_error(L, "invalid format (width or precision too long)"); + *(form++) = '%'; + strncpy(form, strfrmt, p - strfrmt + 1); + form += p - strfrmt + 1; + *form = '\0'; + return p; +} + + +static void addintlen (char *form) { + size_t l = strlen(form); + char spec = form[l - 1]; + strcpy(form + l - 1, LUA_INTFRMLEN); + form[l + sizeof(LUA_INTFRMLEN) - 2] = spec; + form[l + sizeof(LUA_INTFRMLEN) - 1] = '\0'; +} + + +static int str_format (lua_State *L) { + int top = lua_gettop(L); + int arg = 1; + size_t sfl; + const char *strfrmt = luaL_checklstring(L, arg, &sfl); + const char *strfrmt_end = strfrmt+sfl; + luaL_Buffer b; + luaL_buffinit(L, &b); + while (strfrmt < strfrmt_end) { + if (*strfrmt != L_ESC) + luaL_addchar(&b, *strfrmt++); + else if (*++strfrmt == L_ESC) + luaL_addchar(&b, *strfrmt++); /* %% */ + else { /* format item */ + char form[MAX_FORMAT]; /* to store the format (`%...') */ + char buff[MAX_ITEM]; /* to store the formatted item */ + if (++arg > top) + luaL_argerror(L, arg, "no value"); + strfrmt = scanformat(L, strfrmt, form); + switch (*strfrmt++) { + case 'c': { + sprintf(buff, form, (int)luaL_checknumber(L, arg)); + break; + } + case 'd': case 'i': { + addintlen(form); + sprintf(buff, form, (LUA_INTFRM_T)luaL_checknumber(L, arg)); + break; + } + case 'o': case 'u': case 'x': case 'X': { + addintlen(form); + sprintf(buff, form, (unsigned LUA_INTFRM_T)luaL_checknumber(L, arg)); + break; + } + case 'e': case 'E': case 'f': + case 'g': case 'G': { + sprintf(buff, form, (double)luaL_checknumber(L, arg)); + break; + } + case 'q': { + addquoted(L, &b, arg); + continue; /* skip the 'addsize' at the end */ + } + case 's': { + size_t l; + const char *s = luaL_checklstring(L, arg, &l); + if (!strchr(form, '.') && l >= 100) { + /* no precision and string is too long to be formatted; + keep original string */ + lua_pushvalue(L, arg); + luaL_addvalue(&b); + continue; /* skip the `addsize' at the end */ + } + else { + sprintf(buff, form, s); + break; + } + } + default: { /* also treat cases `pnLlh' */ + return luaL_error(L, "invalid option " LUA_QL("%%%c") " to " + LUA_QL("format"), *(strfrmt - 1)); + } + } + luaL_addlstring(&b, buff, strlen(buff)); + } + } + luaL_pushresult(&b); + return 1; +} + + +static const luaL_Reg strlib[] = { + {"byte", str_byte}, + {"char", str_char}, + {"dump", str_dump}, + {"find", str_find}, + {"format", str_format}, + {"gfind", gfind_nodef}, + {"gmatch", gmatch}, + {"gsub", str_gsub}, + {"len", str_len}, + {"lower", str_lower}, + {"match", str_match}, + {"rep", str_rep}, + {"reverse", str_reverse}, + {"sub", str_sub}, + {"upper", str_upper}, + {NULL, NULL} +}; + + +static void createmetatable (lua_State *L) { + lua_createtable(L, 0, 1); /* create metatable for strings */ + lua_pushliteral(L, ""); /* dummy string */ + lua_pushvalue(L, -2); + lua_setmetatable(L, -2); /* set string metatable */ + lua_pop(L, 1); /* pop dummy string */ + lua_pushvalue(L, -2); /* string library... */ + lua_setfield(L, -2, "__index"); /* ...is the __index metamethod */ + lua_pop(L, 1); /* pop metatable */ +} + + +/* +** Open string library +*/ +LUALIB_API int luaopen_string (lua_State *L) { + luaL_register(L, LUA_STRLIBNAME, strlib); +#if defined(LUA_COMPAT_GFIND) + lua_getfield(L, -1, "gmatch"); + lua_setfield(L, -2, "gfind"); +#endif + createmetatable(L); + return 1; +} + diff --git a/deps/lua/src/ltable.c b/deps/lua/src/ltable.c new file mode 100644 index 0000000..f75fe19 --- /dev/null +++ b/deps/lua/src/ltable.c @@ -0,0 +1,589 @@ +/* +** $Id: ltable.c,v 2.32.1.2 2007/12/28 15:32:23 roberto Exp $ +** Lua tables (hash) +** See Copyright Notice in lua.h +*/ + + +/* +** Implementation of tables (aka arrays, objects, or hash tables). +** Tables keep its elements in two parts: an array part and a hash part. +** Non-negative integer keys are all candidates to be kept in the array +** part. The actual size of the array is the largest `n' such that at +** least half the slots between 0 and n are in use. +** Hash uses a mix of chained scatter table with Brent's variation. +** A main invariant of these tables is that, if an element is not +** in its main position (i.e. the `original' position that its hash gives +** to it), then the colliding element is in its own main position. +** Hence even when the load factor reaches 100%, performance remains good. +*/ + +#include +#include + +#define ltable_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lgc.h" +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" +#include "ltable.h" + + +/* +** max size of array part is 2^MAXBITS +*/ +#if LUAI_BITSINT > 26 +#define MAXBITS 26 +#else +#define MAXBITS (LUAI_BITSINT-2) +#endif + +#define MAXASIZE (1 << MAXBITS) + + +#define hashpow2(t,n) (gnode(t, lmod((n), sizenode(t)))) + +#define hashstr(t,str) hashpow2(t, (str)->tsv.hash) +#define hashboolean(t,p) hashpow2(t, p) + + +/* +** for some types, it is better to avoid modulus by power of 2, as +** they tend to have many 2 factors. +*/ +#define hashmod(t,n) (gnode(t, ((n) % ((sizenode(t)-1)|1)))) + + +#define hashpointer(t,p) hashmod(t, IntPoint(p)) + + +/* +** number of ints inside a lua_Number +*/ +#define numints cast_int(sizeof(lua_Number)/sizeof(int)) + + + +#define dummynode (&dummynode_) + +static const Node dummynode_ = { + {{NULL}, LUA_TNIL}, /* value */ + {{{NULL}, LUA_TNIL, NULL}} /* key */ +}; + + +/* +** hash for lua_Numbers +*/ +static Node *hashnum (const Table *t, lua_Number n) { + unsigned int a[numints]; + int i; + if (luai_numeq(n, 0)) /* avoid problems with -0 */ + return gnode(t, 0); + memcpy(a, &n, sizeof(a)); + for (i = 1; i < numints; i++) a[0] += a[i]; + return hashmod(t, a[0]); +} + + + +/* +** returns the `main' position of an element in a table (that is, the index +** of its hash value) +*/ +static Node *mainposition (const Table *t, const TValue *key) { + switch (ttype(key)) { + case LUA_TNUMBER: + return hashnum(t, nvalue(key)); + case LUA_TSTRING: + return hashstr(t, rawtsvalue(key)); + case LUA_TBOOLEAN: + return hashboolean(t, bvalue(key)); + case LUA_TLIGHTUSERDATA: + return hashpointer(t, pvalue(key)); + default: + return hashpointer(t, gcvalue(key)); + } +} + + +/* +** returns the index for `key' if `key' is an appropriate key to live in +** the array part of the table, -1 otherwise. +*/ +static int arrayindex (const TValue *key) { + if (ttisnumber(key)) { + lua_Number n = nvalue(key); + int k; + lua_number2int(k, n); + if (luai_numeq(cast_num(k), n)) + return k; + } + return -1; /* `key' did not match some condition */ +} + + +/* +** returns the index of a `key' for table traversals. First goes all +** elements in the array part, then elements in the hash part. The +** beginning of a traversal is signalled by -1. +*/ +static int findindex (lua_State *L, Table *t, StkId key) { + int i; + if (ttisnil(key)) return -1; /* first iteration */ + i = arrayindex(key); + if (0 < i && i <= t->sizearray) /* is `key' inside array part? */ + return i-1; /* yes; that's the index (corrected to C) */ + else { + Node *n = mainposition(t, key); + do { /* check whether `key' is somewhere in the chain */ + /* key may be dead already, but it is ok to use it in `next' */ + if (luaO_rawequalObj(key2tval(n), key) || + (ttype(gkey(n)) == LUA_TDEADKEY && iscollectable(key) && + gcvalue(gkey(n)) == gcvalue(key))) { + i = cast_int(n - gnode(t, 0)); /* key index in hash table */ + /* hash elements are numbered after array ones */ + return i + t->sizearray; + } + else n = gnext(n); + } while (n); + luaG_runerror(L, "invalid key to " LUA_QL("next")); /* key not found */ + return 0; /* to avoid warnings */ + } +} + + +int luaH_next (lua_State *L, Table *t, StkId key) { + int i = findindex(L, t, key); /* find original element */ + for (i++; i < t->sizearray; i++) { /* try first array part */ + if (!ttisnil(&t->array[i])) { /* a non-nil value? */ + setnvalue(key, cast_num(i+1)); + setobj2s(L, key+1, &t->array[i]); + return 1; + } + } + for (i -= t->sizearray; i < sizenode(t); i++) { /* then hash part */ + if (!ttisnil(gval(gnode(t, i)))) { /* a non-nil value? */ + setobj2s(L, key, key2tval(gnode(t, i))); + setobj2s(L, key+1, gval(gnode(t, i))); + return 1; + } + } + return 0; /* no more elements */ +} + + +/* +** {============================================================= +** Rehash +** ============================================================== +*/ + + +static int computesizes (int nums[], int *narray) { + int i; + int twotoi; /* 2^i */ + int a = 0; /* number of elements smaller than 2^i */ + int na = 0; /* number of elements to go to array part */ + int n = 0; /* optimal size for array part */ + for (i = 0, twotoi = 1; twotoi/2 < *narray; i++, twotoi *= 2) { + if (nums[i] > 0) { + a += nums[i]; + if (a > twotoi/2) { /* more than half elements present? */ + n = twotoi; /* optimal size (till now) */ + na = a; /* all elements smaller than n will go to array part */ + } + } + if (a == *narray) break; /* all elements already counted */ + } + *narray = n; + lua_assert(*narray/2 <= na && na <= *narray); + return na; +} + + +static int countint (const TValue *key, int *nums) { + int k = arrayindex(key); + if (0 < k && k <= MAXASIZE) { /* is `key' an appropriate array index? */ + nums[ceillog2(k)]++; /* count as such */ + return 1; + } + else + return 0; +} + + +static int numusearray (const Table *t, int *nums) { + int lg; + int ttlg; /* 2^lg */ + int ause = 0; /* summation of `nums' */ + int i = 1; /* count to traverse all array keys */ + for (lg=0, ttlg=1; lg<=MAXBITS; lg++, ttlg*=2) { /* for each slice */ + int lc = 0; /* counter */ + int lim = ttlg; + if (lim > t->sizearray) { + lim = t->sizearray; /* adjust upper limit */ + if (i > lim) + break; /* no more elements to count */ + } + /* count elements in range (2^(lg-1), 2^lg] */ + for (; i <= lim; i++) { + if (!ttisnil(&t->array[i-1])) + lc++; + } + nums[lg] += lc; + ause += lc; + } + return ause; +} + + +static int numusehash (const Table *t, int *nums, int *pnasize) { + int totaluse = 0; /* total number of elements */ + int ause = 0; /* summation of `nums' */ + int i = sizenode(t); + while (i--) { + Node *n = &t->node[i]; + if (!ttisnil(gval(n))) { + ause += countint(key2tval(n), nums); + totaluse++; + } + } + *pnasize += ause; + return totaluse; +} + + +static void setarrayvector (lua_State *L, Table *t, int size) { + int i; + luaM_reallocvector(L, t->array, t->sizearray, size, TValue); + for (i=t->sizearray; iarray[i]); + t->sizearray = size; +} + + +static void setnodevector (lua_State *L, Table *t, int size) { + int lsize; + if (size == 0) { /* no elements to hash part? */ + t->node = cast(Node *, dummynode); /* use common `dummynode' */ + lsize = 0; + } + else { + int i; + lsize = ceillog2(size); + if (lsize > MAXBITS) + luaG_runerror(L, "table overflow"); + size = twoto(lsize); + t->node = luaM_newvector(L, size, Node); + for (i=0; ilsizenode = cast_byte(lsize); + t->lastfree = gnode(t, size); /* all positions are free */ +} + + +static void resize (lua_State *L, Table *t, int nasize, int nhsize) { + int i; + int oldasize = t->sizearray; + int oldhsize = t->lsizenode; + Node *nold = t->node; /* save old hash ... */ + if (nasize > oldasize) /* array part must grow? */ + setarrayvector(L, t, nasize); + /* create new hash part with appropriate size */ + setnodevector(L, t, nhsize); + if (nasize < oldasize) { /* array part must shrink? */ + t->sizearray = nasize; + /* re-insert elements from vanishing slice */ + for (i=nasize; iarray[i])) + setobjt2t(L, luaH_setnum(L, t, i+1), &t->array[i]); + } + /* shrink array */ + luaM_reallocvector(L, t->array, oldasize, nasize, TValue); + } + /* re-insert elements from hash part */ + for (i = twoto(oldhsize) - 1; i >= 0; i--) { + Node *old = nold+i; + if (!ttisnil(gval(old))) + setobjt2t(L, luaH_set(L, t, key2tval(old)), gval(old)); + } + if (nold != dummynode) + luaM_freearray(L, nold, twoto(oldhsize), Node); /* free old array */ +} + + +void luaH_resizearray (lua_State *L, Table *t, int nasize) { + int nsize = (t->node == dummynode) ? 0 : sizenode(t); + resize(L, t, nasize, nsize); +} + + +static void rehash (lua_State *L, Table *t, const TValue *ek) { + int nasize, na; + int nums[MAXBITS+1]; /* nums[i] = number of keys between 2^(i-1) and 2^i */ + int i; + int totaluse; + for (i=0; i<=MAXBITS; i++) nums[i] = 0; /* reset counts */ + nasize = numusearray(t, nums); /* count keys in array part */ + totaluse = nasize; /* all those keys are integer keys */ + totaluse += numusehash(t, nums, &nasize); /* count keys in hash part */ + /* count extra key */ + nasize += countint(ek, nums); + totaluse++; + /* compute new size for array part */ + na = computesizes(nums, &nasize); + /* resize the table to new computed sizes */ + resize(L, t, nasize, totaluse - na); +} + + + +/* +** }============================================================= +*/ + + +Table *luaH_new (lua_State *L, int narray, int nhash) { + Table *t = luaM_new(L, Table); + luaC_link(L, obj2gco(t), LUA_TTABLE); + t->metatable = NULL; + t->flags = cast_byte(~0); + /* temporary values (kept only if some malloc fails) */ + t->array = NULL; + t->sizearray = 0; + t->lsizenode = 0; + t->readonly = 0; + t->node = cast(Node *, dummynode); + setarrayvector(L, t, narray); + setnodevector(L, t, nhash); + return t; +} + + +void luaH_free (lua_State *L, Table *t) { + if (t->node != dummynode) + luaM_freearray(L, t->node, sizenode(t), Node); + luaM_freearray(L, t->array, t->sizearray, TValue); + luaM_free(L, t); +} + + +static Node *getfreepos (Table *t) { + while (t->lastfree-- > t->node) { + if (ttisnil(gkey(t->lastfree))) + return t->lastfree; + } + return NULL; /* could not find a free place */ +} + + + +/* +** inserts a new key into a hash table; first, check whether key's main +** position is free. If not, check whether colliding node is in its main +** position or not: if it is not, move colliding node to an empty place and +** put new key in its main position; otherwise (colliding node is in its main +** position), new key goes to an empty position. +*/ +static TValue *newkey (lua_State *L, Table *t, const TValue *key) { + Node *mp = mainposition(t, key); + if (!ttisnil(gval(mp)) || mp == dummynode) { + Node *othern; + Node *n = getfreepos(t); /* get a free place */ + if (n == NULL) { /* cannot find a free place? */ + rehash(L, t, key); /* grow table */ + return luaH_set(L, t, key); /* re-insert key into grown table */ + } + lua_assert(n != dummynode); + othern = mainposition(t, key2tval(mp)); + if (othern != mp) { /* is colliding node out of its main position? */ + /* yes; move colliding node into free position */ + while (gnext(othern) != mp) othern = gnext(othern); /* find previous */ + gnext(othern) = n; /* redo the chain with `n' in place of `mp' */ + *n = *mp; /* copy colliding node into free pos. (mp->next also goes) */ + gnext(mp) = NULL; /* now `mp' is free */ + setnilvalue(gval(mp)); + } + else { /* colliding node is in its own main position */ + /* new node will go into free position */ + gnext(n) = gnext(mp); /* chain new position */ + gnext(mp) = n; + mp = n; + } + } + gkey(mp)->value = key->value; gkey(mp)->tt = key->tt; + luaC_barriert(L, t, key); + lua_assert(ttisnil(gval(mp))); + return gval(mp); +} + + +/* +** search function for integers +*/ +const TValue *luaH_getnum (Table *t, int key) { + /* (1 <= key && key <= t->sizearray) */ + if (cast(unsigned int, key-1) < cast(unsigned int, t->sizearray)) + return &t->array[key-1]; + else { + lua_Number nk = cast_num(key); + Node *n = hashnum(t, nk); + do { /* check whether `key' is somewhere in the chain */ + if (ttisnumber(gkey(n)) && luai_numeq(nvalue(gkey(n)), nk)) + return gval(n); /* that's it */ + else n = gnext(n); + } while (n); + return luaO_nilobject; + } +} + + +/* +** search function for strings +*/ +const TValue *luaH_getstr (Table *t, TString *key) { + Node *n = hashstr(t, key); + do { /* check whether `key' is somewhere in the chain */ + if (ttisstring(gkey(n)) && rawtsvalue(gkey(n)) == key) + return gval(n); /* that's it */ + else n = gnext(n); + } while (n); + return luaO_nilobject; +} + + +/* +** main search function +*/ +const TValue *luaH_get (Table *t, const TValue *key) { + switch (ttype(key)) { + case LUA_TNIL: return luaO_nilobject; + case LUA_TSTRING: return luaH_getstr(t, rawtsvalue(key)); + case LUA_TNUMBER: { + int k; + lua_Number n = nvalue(key); + lua_number2int(k, n); + if (luai_numeq(cast_num(k), nvalue(key))) /* index is int? */ + return luaH_getnum(t, k); /* use specialized version */ + /* else go through */ + } + default: { + Node *n = mainposition(t, key); + do { /* check whether `key' is somewhere in the chain */ + if (luaO_rawequalObj(key2tval(n), key)) + return gval(n); /* that's it */ + else n = gnext(n); + } while (n); + return luaO_nilobject; + } + } +} + + +TValue *luaH_set (lua_State *L, Table *t, const TValue *key) { + const TValue *p = luaH_get(t, key); + t->flags = 0; + if (p != luaO_nilobject) + return cast(TValue *, p); + else { + if (ttisnil(key)) luaG_runerror(L, "table index is nil"); + else if (ttisnumber(key) && luai_numisnan(nvalue(key))) + luaG_runerror(L, "table index is NaN"); + return newkey(L, t, key); + } +} + + +TValue *luaH_setnum (lua_State *L, Table *t, int key) { + const TValue *p = luaH_getnum(t, key); + if (p != luaO_nilobject) + return cast(TValue *, p); + else { + TValue k; + setnvalue(&k, cast_num(key)); + return newkey(L, t, &k); + } +} + + +TValue *luaH_setstr (lua_State *L, Table *t, TString *key) { + const TValue *p = luaH_getstr(t, key); + if (p != luaO_nilobject) + return cast(TValue *, p); + else { + TValue k; + setsvalue(L, &k, key); + return newkey(L, t, &k); + } +} + + +static int unbound_search (Table *t, unsigned int j) { + unsigned int i = j; /* i is zero or a present index */ + j++; + /* find `i' and `j' such that i is present and j is not */ + while (!ttisnil(luaH_getnum(t, j))) { + i = j; + j *= 2; + if (j > cast(unsigned int, MAX_INT)) { /* overflow? */ + /* table was built with bad purposes: resort to linear search */ + i = 1; + while (!ttisnil(luaH_getnum(t, i))) i++; + return i - 1; + } + } + /* now do a binary search between them */ + while (j - i > 1) { + unsigned int m = (i+j)/2; + if (ttisnil(luaH_getnum(t, m))) j = m; + else i = m; + } + return i; +} + + +/* +** Try to find a boundary in table `t'. A `boundary' is an integer index +** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil). +*/ +int luaH_getn (Table *t) { + unsigned int j = t->sizearray; + if (j > 0 && ttisnil(&t->array[j - 1])) { + /* there is a boundary in the array part: (binary) search for it */ + unsigned int i = 0; + while (j - i > 1) { + unsigned int m = (i+j)/2; + if (ttisnil(&t->array[m - 1])) j = m; + else i = m; + } + return i; + } + /* else must find a boundary in hash part */ + else if (t->node == dummynode) /* hash part is empty? */ + return j; /* that is easy... */ + else return unbound_search(t, j); +} + + + +#if defined(LUA_DEBUG) + +Node *luaH_mainposition (const Table *t, const TValue *key) { + return mainposition(t, key); +} + +int luaH_isdummy (Node *n) { return n == dummynode; } + +#endif diff --git a/deps/lua/src/ltable.h b/deps/lua/src/ltable.h new file mode 100644 index 0000000..f5b9d5e --- /dev/null +++ b/deps/lua/src/ltable.h @@ -0,0 +1,40 @@ +/* +** $Id: ltable.h,v 2.10.1.1 2007/12/27 13:02:25 roberto Exp $ +** Lua tables (hash) +** See Copyright Notice in lua.h +*/ + +#ifndef ltable_h +#define ltable_h + +#include "lobject.h" + + +#define gnode(t,i) (&(t)->node[i]) +#define gkey(n) (&(n)->i_key.nk) +#define gval(n) (&(n)->i_val) +#define gnext(n) ((n)->i_key.nk.next) + +#define key2tval(n) (&(n)->i_key.tvk) + + +LUAI_FUNC const TValue *luaH_getnum (Table *t, int key); +LUAI_FUNC TValue *luaH_setnum (lua_State *L, Table *t, int key); +LUAI_FUNC const TValue *luaH_getstr (Table *t, TString *key); +LUAI_FUNC TValue *luaH_setstr (lua_State *L, Table *t, TString *key); +LUAI_FUNC const TValue *luaH_get (Table *t, const TValue *key); +LUAI_FUNC TValue *luaH_set (lua_State *L, Table *t, const TValue *key); +LUAI_FUNC Table *luaH_new (lua_State *L, int narray, int lnhash); +LUAI_FUNC void luaH_resizearray (lua_State *L, Table *t, int nasize); +LUAI_FUNC void luaH_free (lua_State *L, Table *t); +LUAI_FUNC int luaH_next (lua_State *L, Table *t, StkId key); +LUAI_FUNC int luaH_getn (Table *t); + + +#if defined(LUA_DEBUG) +LUAI_FUNC Node *luaH_mainposition (const Table *t, const TValue *key); +LUAI_FUNC int luaH_isdummy (Node *n); +#endif + + +#endif diff --git a/deps/lua/src/ltablib.c b/deps/lua/src/ltablib.c new file mode 100644 index 0000000..0bdac7f --- /dev/null +++ b/deps/lua/src/ltablib.c @@ -0,0 +1,287 @@ +/* +** $Id: ltablib.c,v 1.38.1.3 2008/02/14 16:46:58 roberto Exp $ +** Library for Table Manipulation +** See Copyright Notice in lua.h +*/ + + +#include + +#define ltablib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + +#define aux_getn(L,n) (luaL_checktype(L, n, LUA_TTABLE), luaL_getn(L, n)) + + +static int foreachi (lua_State *L) { + int i; + int n = aux_getn(L, 1); + luaL_checktype(L, 2, LUA_TFUNCTION); + for (i=1; i <= n; i++) { + lua_pushvalue(L, 2); /* function */ + lua_pushinteger(L, i); /* 1st argument */ + lua_rawgeti(L, 1, i); /* 2nd argument */ + lua_call(L, 2, 1); + if (!lua_isnil(L, -1)) + return 1; + lua_pop(L, 1); /* remove nil result */ + } + return 0; +} + + +static int foreach (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + luaL_checktype(L, 2, LUA_TFUNCTION); + lua_pushnil(L); /* first key */ + while (lua_next(L, 1)) { + lua_pushvalue(L, 2); /* function */ + lua_pushvalue(L, -3); /* key */ + lua_pushvalue(L, -3); /* value */ + lua_call(L, 2, 1); + if (!lua_isnil(L, -1)) + return 1; + lua_pop(L, 2); /* remove value and result */ + } + return 0; +} + + +static int maxn (lua_State *L) { + lua_Number max = 0; + luaL_checktype(L, 1, LUA_TTABLE); + lua_pushnil(L); /* first key */ + while (lua_next(L, 1)) { + lua_pop(L, 1); /* remove value */ + if (lua_type(L, -1) == LUA_TNUMBER) { + lua_Number v = lua_tonumber(L, -1); + if (v > max) max = v; + } + } + lua_pushnumber(L, max); + return 1; +} + + +static int getn (lua_State *L) { + lua_pushinteger(L, aux_getn(L, 1)); + return 1; +} + + +static int setn (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); +#ifndef luaL_setn + luaL_setn(L, 1, luaL_checkint(L, 2)); +#else + luaL_error(L, LUA_QL("setn") " is obsolete"); +#endif + lua_pushvalue(L, 1); + return 1; +} + + +static int tinsert (lua_State *L) { + int e = aux_getn(L, 1) + 1; /* first empty element */ + int pos; /* where to insert new element */ + switch (lua_gettop(L)) { + case 2: { /* called with only 2 arguments */ + pos = e; /* insert new element at the end */ + break; + } + case 3: { + int i; + pos = luaL_checkint(L, 2); /* 2nd argument is the position */ + if (pos > e) e = pos; /* `grow' array if necessary */ + for (i = e; i > pos; i--) { /* move up elements */ + lua_rawgeti(L, 1, i-1); + lua_rawseti(L, 1, i); /* t[i] = t[i-1] */ + } + break; + } + default: { + return luaL_error(L, "wrong number of arguments to " LUA_QL("insert")); + } + } + luaL_setn(L, 1, e); /* new size */ + lua_rawseti(L, 1, pos); /* t[pos] = v */ + return 0; +} + + +static int tremove (lua_State *L) { + int e = aux_getn(L, 1); + int pos = luaL_optint(L, 2, e); + if (!(1 <= pos && pos <= e)) /* position is outside bounds? */ + return 0; /* nothing to remove */ + luaL_setn(L, 1, e - 1); /* t.n = n-1 */ + lua_rawgeti(L, 1, pos); /* result = t[pos] */ + for ( ;pos= P */ + while (lua_rawgeti(L, 1, ++i), sort_comp(L, -1, -2)) { + if (i>u) luaL_error(L, "invalid order function for sorting"); + lua_pop(L, 1); /* remove a[i] */ + } + /* repeat --j until a[j] <= P */ + while (lua_rawgeti(L, 1, --j), sort_comp(L, -3, -1)) { + if (j + +#define ltm_c +#define LUA_CORE + +#include "lua.h" + +#include "lobject.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" + + + +const char *const luaT_typenames[] = { + "nil", "boolean", "userdata", "number", + "string", "table", "function", "userdata", "thread", + "proto", "upval" +}; + + +void luaT_init (lua_State *L) { + static const char *const luaT_eventname[] = { /* ORDER TM */ + "__index", "__newindex", + "__gc", "__mode", "__eq", + "__add", "__sub", "__mul", "__div", "__mod", + "__pow", "__unm", "__len", "__lt", "__le", + "__concat", "__call" + }; + int i; + for (i=0; itmname[i] = luaS_new(L, luaT_eventname[i]); + luaS_fix(G(L)->tmname[i]); /* never collect these names */ + } +} + + +/* +** function to be used with macro "fasttm": optimized for absence of +** tag methods +*/ +const TValue *luaT_gettm (Table *events, TMS event, TString *ename) { + const TValue *tm = luaH_getstr(events, ename); + lua_assert(event <= TM_EQ); + if (ttisnil(tm)) { /* no tag method? */ + events->flags |= cast_byte(1u<metatable; + break; + case LUA_TUSERDATA: + mt = uvalue(o)->metatable; + break; + default: + mt = G(L)->mt[ttype(o)]; + } + return (mt ? luaH_getstr(mt, G(L)->tmname[event]) : luaO_nilobject); +} + diff --git a/deps/lua/src/ltm.h b/deps/lua/src/ltm.h new file mode 100644 index 0000000..64343b7 --- /dev/null +++ b/deps/lua/src/ltm.h @@ -0,0 +1,54 @@ +/* +** $Id: ltm.h,v 2.6.1.1 2007/12/27 13:02:25 roberto Exp $ +** Tag methods +** See Copyright Notice in lua.h +*/ + +#ifndef ltm_h +#define ltm_h + + +#include "lobject.h" + + +/* +* WARNING: if you change the order of this enumeration, +* grep "ORDER TM" +*/ +typedef enum { + TM_INDEX, + TM_NEWINDEX, + TM_GC, + TM_MODE, + TM_EQ, /* last tag method with `fast' access */ + TM_ADD, + TM_SUB, + TM_MUL, + TM_DIV, + TM_MOD, + TM_POW, + TM_UNM, + TM_LEN, + TM_LT, + TM_LE, + TM_CONCAT, + TM_CALL, + TM_N /* number of elements in the enum */ +} TMS; + + + +#define gfasttm(g,et,e) ((et) == NULL ? NULL : \ + ((et)->flags & (1u<<(e))) ? NULL : luaT_gettm(et, e, (g)->tmname[e])) + +#define fasttm(l,et,e) gfasttm(G(l), et, e) + +LUAI_DATA const char *const luaT_typenames[]; + + +LUAI_FUNC const TValue *luaT_gettm (Table *events, TMS event, TString *ename); +LUAI_FUNC const TValue *luaT_gettmbyobj (lua_State *L, const TValue *o, + TMS event); +LUAI_FUNC void luaT_init (lua_State *L); + +#endif diff --git a/deps/lua/src/lua.c b/deps/lua/src/lua.c new file mode 100644 index 0000000..3a46609 --- /dev/null +++ b/deps/lua/src/lua.c @@ -0,0 +1,392 @@ +/* +** $Id: lua.c,v 1.160.1.2 2007/12/28 15:32:23 roberto Exp $ +** Lua stand-alone interpreter +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include +#include + +#define lua_c + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + + +static lua_State *globalL = NULL; + +static const char *progname = LUA_PROGNAME; + + + +static void lstop (lua_State *L, lua_Debug *ar) { + (void)ar; /* unused arg. */ + lua_sethook(L, NULL, 0, 0); + luaL_error(L, "interrupted!"); +} + + +static void laction (int i) { + signal(i, SIG_DFL); /* if another SIGINT happens before lstop, + terminate process (default action) */ + lua_sethook(globalL, lstop, LUA_MASKCALL | LUA_MASKRET | LUA_MASKCOUNT, 1); +} + + +static void print_usage (void) { + fprintf(stderr, + "usage: %s [options] [script [args]].\n" + "Available options are:\n" + " -e stat execute string " LUA_QL("stat") "\n" + " -l name require library " LUA_QL("name") "\n" + " -i enter interactive mode after executing " LUA_QL("script") "\n" + " -v show version information\n" + " -- stop handling options\n" + " - execute stdin and stop handling options\n" + , + progname); + fflush(stderr); +} + + +static void l_message (const char *pname, const char *msg) { + if (pname) fprintf(stderr, "%s: ", pname); + fprintf(stderr, "%s\n", msg); + fflush(stderr); +} + + +static int report (lua_State *L, int status) { + if (status && !lua_isnil(L, -1)) { + const char *msg = lua_tostring(L, -1); + if (msg == NULL) msg = "(error object is not a string)"; + l_message(progname, msg); + lua_pop(L, 1); + } + return status; +} + + +static int traceback (lua_State *L) { + if (!lua_isstring(L, 1)) /* 'message' not a string? */ + return 1; /* keep it intact */ + lua_getfield(L, LUA_GLOBALSINDEX, "debug"); + if (!lua_istable(L, -1)) { + lua_pop(L, 1); + return 1; + } + lua_getfield(L, -1, "traceback"); + if (!lua_isfunction(L, -1)) { + lua_pop(L, 2); + return 1; + } + lua_pushvalue(L, 1); /* pass error message */ + lua_pushinteger(L, 2); /* skip this function and traceback */ + lua_call(L, 2, 1); /* call debug.traceback */ + return 1; +} + + +static int docall (lua_State *L, int narg, int clear) { + int status; + int base = lua_gettop(L) - narg; /* function index */ + lua_pushcfunction(L, traceback); /* push traceback function */ + lua_insert(L, base); /* put it under chunk and args */ + signal(SIGINT, laction); + status = lua_pcall(L, narg, (clear ? 0 : LUA_MULTRET), base); + signal(SIGINT, SIG_DFL); + lua_remove(L, base); /* remove traceback function */ + /* force a complete garbage collection in case of errors */ + if (status != 0) lua_gc(L, LUA_GCCOLLECT, 0); + return status; +} + + +static void print_version (void) { + l_message(NULL, LUA_RELEASE " " LUA_COPYRIGHT); +} + + +static int getargs (lua_State *L, char **argv, int n) { + int narg; + int i; + int argc = 0; + while (argv[argc]) argc++; /* count total number of arguments */ + narg = argc - (n + 1); /* number of arguments to the script */ + luaL_checkstack(L, narg + 3, "too many arguments to script"); + for (i=n+1; i < argc; i++) + lua_pushstring(L, argv[i]); + lua_createtable(L, narg, n + 1); + for (i=0; i < argc; i++) { + lua_pushstring(L, argv[i]); + lua_rawseti(L, -2, i - n); + } + return narg; +} + + +static int dofile (lua_State *L, const char *name) { + int status = luaL_loadfile(L, name) || docall(L, 0, 1); + return report(L, status); +} + + +static int dostring (lua_State *L, const char *s, const char *name) { + int status = luaL_loadbuffer(L, s, strlen(s), name) || docall(L, 0, 1); + return report(L, status); +} + + +static int dolibrary (lua_State *L, const char *name) { + lua_getglobal(L, "require"); + lua_pushstring(L, name); + return report(L, docall(L, 1, 1)); +} + + +static const char *get_prompt (lua_State *L, int firstline) { + const char *p; + lua_getfield(L, LUA_GLOBALSINDEX, firstline ? "_PROMPT" : "_PROMPT2"); + p = lua_tostring(L, -1); + if (p == NULL) p = (firstline ? LUA_PROMPT : LUA_PROMPT2); + lua_pop(L, 1); /* remove global */ + return p; +} + + +static int incomplete (lua_State *L, int status) { + if (status == LUA_ERRSYNTAX) { + size_t lmsg; + const char *msg = lua_tolstring(L, -1, &lmsg); + const char *tp = msg + lmsg - (sizeof(LUA_QL("")) - 1); + if (strstr(msg, LUA_QL("")) == tp) { + lua_pop(L, 1); + return 1; + } + } + return 0; /* else... */ +} + + +static int pushline (lua_State *L, int firstline) { + char buffer[LUA_MAXINPUT]; + char *b = buffer; + size_t l; + const char *prmt = get_prompt(L, firstline); + if (lua_readline(L, b, prmt) == 0) + return 0; /* no input */ + l = strlen(b); + if (l > 0 && b[l-1] == '\n') /* line ends with newline? */ + b[l-1] = '\0'; /* remove it */ + if (firstline && b[0] == '=') /* first line starts with `=' ? */ + lua_pushfstring(L, "return %s", b+1); /* change it to `return' */ + else + lua_pushstring(L, b); + lua_freeline(L, b); + return 1; +} + + +static int loadline (lua_State *L) { + int status; + lua_settop(L, 0); + if (!pushline(L, 1)) + return -1; /* no input */ + for (;;) { /* repeat until gets a complete line */ + status = luaL_loadbuffer(L, lua_tostring(L, 1), lua_strlen(L, 1), "=stdin"); + if (!incomplete(L, status)) break; /* cannot try to add lines? */ + if (!pushline(L, 0)) /* no more input? */ + return -1; + lua_pushliteral(L, "\n"); /* add a new line... */ + lua_insert(L, -2); /* ...between the two lines */ + lua_concat(L, 3); /* join them */ + } + lua_saveline(L, 1); + lua_remove(L, 1); /* remove line */ + return status; +} + + +static void dotty (lua_State *L) { + int status; + const char *oldprogname = progname; + progname = NULL; + while ((status = loadline(L)) != -1) { + if (status == 0) status = docall(L, 0, 0); + report(L, status); + if (status == 0 && lua_gettop(L) > 0) { /* any result to print? */ + lua_getglobal(L, "print"); + lua_insert(L, 1); + if (lua_pcall(L, lua_gettop(L)-1, 0, 0) != 0) + l_message(progname, lua_pushfstring(L, + "error calling " LUA_QL("print") " (%s)", + lua_tostring(L, -1))); + } + } + lua_settop(L, 0); /* clear stack */ + fputs("\n", stdout); + fflush(stdout); + progname = oldprogname; +} + + +static int handle_script (lua_State *L, char **argv, int n) { + int status; + const char *fname; + int narg = getargs(L, argv, n); /* collect arguments */ + lua_setglobal(L, "arg"); + fname = argv[n]; + if (strcmp(fname, "-") == 0 && strcmp(argv[n-1], "--") != 0) + fname = NULL; /* stdin */ + status = luaL_loadfile(L, fname); + lua_insert(L, -(narg+1)); + if (status == 0) + status = docall(L, narg, 0); + else + lua_pop(L, narg); + return report(L, status); +} + + +/* check that argument has no extra characters at the end */ +#define notail(x) {if ((x)[2] != '\0') return -1;} + + +static int collectargs (char **argv, int *pi, int *pv, int *pe) { + int i; + for (i = 1; argv[i] != NULL; i++) { + if (argv[i][0] != '-') /* not an option? */ + return i; + switch (argv[i][1]) { /* option */ + case '-': + notail(argv[i]); + return (argv[i+1] != NULL ? i+1 : 0); + case '\0': + return i; + case 'i': + notail(argv[i]); + *pi = 1; /* go through */ + case 'v': + notail(argv[i]); + *pv = 1; + break; + case 'e': + *pe = 1; /* go through */ + case 'l': + if (argv[i][2] == '\0') { + i++; + if (argv[i] == NULL) return -1; + } + break; + default: return -1; /* invalid option */ + } + } + return 0; +} + + +static int runargs (lua_State *L, char **argv, int n) { + int i; + for (i = 1; i < n; i++) { + if (argv[i] == NULL) continue; + lua_assert(argv[i][0] == '-'); + switch (argv[i][1]) { /* option */ + case 'e': { + const char *chunk = argv[i] + 2; + if (*chunk == '\0') chunk = argv[++i]; + lua_assert(chunk != NULL); + if (dostring(L, chunk, "=(command line)") != 0) + return 1; + break; + } + case 'l': { + const char *filename = argv[i] + 2; + if (*filename == '\0') filename = argv[++i]; + lua_assert(filename != NULL); + if (dolibrary(L, filename)) + return 1; /* stop if file fails */ + break; + } + default: break; + } + } + return 0; +} + + +static int handle_luainit (lua_State *L) { + const char *init = getenv(LUA_INIT); + if (init == NULL) return 0; /* status OK */ + else if (init[0] == '@') + return dofile(L, init+1); + else + return dostring(L, init, "=" LUA_INIT); +} + + +struct Smain { + int argc; + char **argv; + int status; +}; + + +static int pmain (lua_State *L) { + struct Smain *s = (struct Smain *)lua_touserdata(L, 1); + char **argv = s->argv; + int script; + int has_i = 0, has_v = 0, has_e = 0; + globalL = L; + if (argv[0] && argv[0][0]) progname = argv[0]; + lua_gc(L, LUA_GCSTOP, 0); /* stop collector during initialization */ + luaL_openlibs(L); /* open libraries */ + lua_gc(L, LUA_GCRESTART, 0); + s->status = handle_luainit(L); + if (s->status != 0) return 0; + script = collectargs(argv, &has_i, &has_v, &has_e); + if (script < 0) { /* invalid args? */ + print_usage(); + s->status = 1; + return 0; + } + if (has_v) print_version(); + s->status = runargs(L, argv, (script > 0) ? script : s->argc); + if (s->status != 0) return 0; + if (script) + s->status = handle_script(L, argv, script); + if (s->status != 0) return 0; + if (has_i) + dotty(L); + else if (script == 0 && !has_e && !has_v) { + if (lua_stdin_is_tty()) { + print_version(); + dotty(L); + } + else dofile(L, NULL); /* executes stdin as a file */ + } + return 0; +} + + +int main (int argc, char **argv) { + int status; + struct Smain s; + lua_State *L = lua_open(); /* create state */ + if (L == NULL) { + l_message(argv[0], "cannot create state: not enough memory"); + return EXIT_FAILURE; + } + s.argc = argc; + s.argv = argv; + status = lua_cpcall(L, &pmain, &s); + report(L, status); + lua_close(L); + return (status || s.status) ? EXIT_FAILURE : EXIT_SUCCESS; +} + diff --git a/deps/lua/src/lua.h b/deps/lua/src/lua.h new file mode 100644 index 0000000..280ef23 --- /dev/null +++ b/deps/lua/src/lua.h @@ -0,0 +1,391 @@ +/* +** $Id: lua.h,v 1.218.1.7 2012/01/13 20:36:20 roberto Exp $ +** Lua - An Extensible Extension Language +** Lua.org, PUC-Rio, Brazil (http://www.lua.org) +** See Copyright Notice at the end of this file +*/ + + +#ifndef lua_h +#define lua_h + +#include +#include + + +#include "luaconf.h" + + +#define LUA_VERSION "Lua 5.1" +#define LUA_RELEASE "Lua 5.1.5" +#define LUA_VERSION_NUM 501 +#define LUA_COPYRIGHT "Copyright (C) 1994-2012 Lua.org, PUC-Rio" +#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes" + + +/* mark for precompiled code (`Lua') */ +#define LUA_SIGNATURE "\033Lua" + +/* option for multiple returns in `lua_pcall' and `lua_call' */ +#define LUA_MULTRET (-1) + + +/* +** pseudo-indices +*/ +#define LUA_REGISTRYINDEX (-10000) +#define LUA_ENVIRONINDEX (-10001) +#define LUA_GLOBALSINDEX (-10002) +#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i)) + + +/* thread status; 0 is OK */ +#define LUA_YIELD 1 +#define LUA_ERRRUN 2 +#define LUA_ERRSYNTAX 3 +#define LUA_ERRMEM 4 +#define LUA_ERRERR 5 + + +typedef struct lua_State lua_State; + +typedef int (*lua_CFunction) (lua_State *L); + + +/* +** functions that read/write blocks when loading/dumping Lua chunks +*/ +typedef const char * (*lua_Reader) (lua_State *L, void *ud, size_t *sz); + +typedef int (*lua_Writer) (lua_State *L, const void* p, size_t sz, void* ud); + + +/* +** prototype for memory-allocation functions +*/ +typedef void * (*lua_Alloc) (void *ud, void *ptr, size_t osize, size_t nsize); + + +/* +** basic types +*/ +#define LUA_TNONE (-1) + +#define LUA_TNIL 0 +#define LUA_TBOOLEAN 1 +#define LUA_TLIGHTUSERDATA 2 +#define LUA_TNUMBER 3 +#define LUA_TSTRING 4 +#define LUA_TTABLE 5 +#define LUA_TFUNCTION 6 +#define LUA_TUSERDATA 7 +#define LUA_TTHREAD 8 + + + +/* minimum Lua stack available to a C function */ +#define LUA_MINSTACK 20 + + +/* +** generic extra include file +*/ +#if defined(LUA_USER_H) +#include LUA_USER_H +#endif + + +/* type of numbers in Lua */ +typedef LUA_NUMBER lua_Number; + + +/* type for integer functions */ +typedef LUA_INTEGER lua_Integer; + + + +/* +** state manipulation +*/ +LUA_API lua_State *(lua_newstate) (lua_Alloc f, void *ud); +LUA_API void (lua_close) (lua_State *L); +LUA_API lua_State *(lua_newthread) (lua_State *L); + +LUA_API lua_CFunction (lua_atpanic) (lua_State *L, lua_CFunction panicf); + + +/* +** basic stack manipulation +*/ +LUA_API int (lua_gettop) (lua_State *L); +LUA_API void (lua_settop) (lua_State *L, int idx); +LUA_API void (lua_pushvalue) (lua_State *L, int idx); +LUA_API void (lua_remove) (lua_State *L, int idx); +LUA_API void (lua_insert) (lua_State *L, int idx); +LUA_API void (lua_replace) (lua_State *L, int idx); +LUA_API int (lua_checkstack) (lua_State *L, int sz); + +LUA_API void (lua_xmove) (lua_State *from, lua_State *to, int n); + + +/* +** access functions (stack -> C) +*/ + +LUA_API int (lua_isnumber) (lua_State *L, int idx); +LUA_API int (lua_isstring) (lua_State *L, int idx); +LUA_API int (lua_iscfunction) (lua_State *L, int idx); +LUA_API int (lua_isuserdata) (lua_State *L, int idx); +LUA_API int (lua_type) (lua_State *L, int idx); +LUA_API const char *(lua_typename) (lua_State *L, int tp); + +LUA_API int (lua_equal) (lua_State *L, int idx1, int idx2); +LUA_API int (lua_rawequal) (lua_State *L, int idx1, int idx2); +LUA_API int (lua_lessthan) (lua_State *L, int idx1, int idx2); + +LUA_API lua_Number (lua_tonumber) (lua_State *L, int idx); +LUA_API lua_Integer (lua_tointeger) (lua_State *L, int idx); +LUA_API int (lua_toboolean) (lua_State *L, int idx); +LUA_API const char *(lua_tolstring) (lua_State *L, int idx, size_t *len); +LUA_API size_t (lua_objlen) (lua_State *L, int idx); +LUA_API lua_CFunction (lua_tocfunction) (lua_State *L, int idx); +LUA_API void *(lua_touserdata) (lua_State *L, int idx); +LUA_API lua_State *(lua_tothread) (lua_State *L, int idx); +LUA_API const void *(lua_topointer) (lua_State *L, int idx); + + +/* +** push functions (C -> stack) +*/ +LUA_API void (lua_pushnil) (lua_State *L); +LUA_API void (lua_pushnumber) (lua_State *L, lua_Number n); +LUA_API void (lua_pushinteger) (lua_State *L, lua_Integer n); +LUA_API void (lua_pushlstring) (lua_State *L, const char *s, size_t l); +LUA_API void (lua_pushstring) (lua_State *L, const char *s); +LUA_API const char *(lua_pushvfstring) (lua_State *L, const char *fmt, + va_list argp); +LUA_API const char *(lua_pushfstring) (lua_State *L, const char *fmt, ...); +LUA_API void (lua_pushcclosure) (lua_State *L, lua_CFunction fn, int n); +LUA_API void (lua_pushboolean) (lua_State *L, int b); +LUA_API void (lua_pushlightuserdata) (lua_State *L, void *p); +LUA_API int (lua_pushthread) (lua_State *L); + + +/* +** get functions (Lua -> stack) +*/ +LUA_API void (lua_gettable) (lua_State *L, int idx); +LUA_API void (lua_getfield) (lua_State *L, int idx, const char *k); +LUA_API void (lua_rawget) (lua_State *L, int idx); +LUA_API void (lua_rawgeti) (lua_State *L, int idx, int n); +LUA_API void (lua_createtable) (lua_State *L, int narr, int nrec); +LUA_API void *(lua_newuserdata) (lua_State *L, size_t sz); +LUA_API int (lua_getmetatable) (lua_State *L, int objindex); +LUA_API void (lua_getfenv) (lua_State *L, int idx); + + +/* +** set functions (stack -> Lua) +*/ +LUA_API void (lua_settable) (lua_State *L, int idx); +LUA_API void (lua_setfield) (lua_State *L, int idx, const char *k); +LUA_API void (lua_rawset) (lua_State *L, int idx); +LUA_API void (lua_rawseti) (lua_State *L, int idx, int n); +LUA_API int (lua_setmetatable) (lua_State *L, int objindex); +LUA_API int (lua_setfenv) (lua_State *L, int idx); + + +/* +** `load' and `call' functions (load and run Lua code) +*/ +LUA_API void (lua_call) (lua_State *L, int nargs, int nresults); +LUA_API int (lua_pcall) (lua_State *L, int nargs, int nresults, int errfunc); +LUA_API int (lua_cpcall) (lua_State *L, lua_CFunction func, void *ud); +LUA_API int (lua_load) (lua_State *L, lua_Reader reader, void *dt, + const char *chunkname); + +LUA_API int (lua_dump) (lua_State *L, lua_Writer writer, void *data); + + +/* +** coroutine functions +*/ +LUA_API int (lua_yield) (lua_State *L, int nresults); +LUA_API int (lua_resume) (lua_State *L, int narg); +LUA_API int (lua_status) (lua_State *L); + +/* +** garbage-collection function and options +*/ + +#define LUA_GCSTOP 0 +#define LUA_GCRESTART 1 +#define LUA_GCCOLLECT 2 +#define LUA_GCCOUNT 3 +#define LUA_GCCOUNTB 4 +#define LUA_GCSTEP 5 +#define LUA_GCSETPAUSE 6 +#define LUA_GCSETSTEPMUL 7 + +LUA_API int (lua_gc) (lua_State *L, int what, int data); + + +/* +** miscellaneous functions +*/ + +LUA_API int (lua_error) (lua_State *L); + +LUA_API int (lua_next) (lua_State *L, int idx); + +LUA_API void (lua_concat) (lua_State *L, int n); + +LUA_API lua_Alloc (lua_getallocf) (lua_State *L, void **ud); +LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud); + + + +/* +** =============================================================== +** some useful macros +** =============================================================== +*/ + +#define lua_pop(L,n) lua_settop(L, -(n)-1) + +#define lua_newtable(L) lua_createtable(L, 0, 0) + +#define lua_register(L,n,f) (lua_pushcfunction(L, (f)), lua_setglobal(L, (n))) + +#define lua_pushcfunction(L,f) lua_pushcclosure(L, (f), 0) + +#define lua_strlen(L,i) lua_objlen(L, (i)) + +#define lua_isfunction(L,n) (lua_type(L, (n)) == LUA_TFUNCTION) +#define lua_istable(L,n) (lua_type(L, (n)) == LUA_TTABLE) +#define lua_islightuserdata(L,n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA) +#define lua_isnil(L,n) (lua_type(L, (n)) == LUA_TNIL) +#define lua_isboolean(L,n) (lua_type(L, (n)) == LUA_TBOOLEAN) +#define lua_isthread(L,n) (lua_type(L, (n)) == LUA_TTHREAD) +#define lua_isnone(L,n) (lua_type(L, (n)) == LUA_TNONE) +#define lua_isnoneornil(L, n) (lua_type(L, (n)) <= 0) + +#define lua_pushliteral(L, s) \ + lua_pushlstring(L, "" s, (sizeof(s)/sizeof(char))-1) + +#define lua_setglobal(L,s) lua_setfield(L, LUA_GLOBALSINDEX, (s)) +#define lua_getglobal(L,s) lua_getfield(L, LUA_GLOBALSINDEX, (s)) + +#define lua_tostring(L,i) lua_tolstring(L, (i), NULL) + + + +/* +** compatibility macros and functions +*/ + +#define lua_open() luaL_newstate() + +#define lua_getregistry(L) lua_pushvalue(L, LUA_REGISTRYINDEX) + +#define lua_getgccount(L) lua_gc(L, LUA_GCCOUNT, 0) + +#define lua_Chunkreader lua_Reader +#define lua_Chunkwriter lua_Writer + + +/* hack */ +LUA_API void lua_setlevel (lua_State *from, lua_State *to); + + +/* +** {====================================================================== +** Debug API +** ======================================================================= +*/ + + +/* +** Event codes +*/ +#define LUA_HOOKCALL 0 +#define LUA_HOOKRET 1 +#define LUA_HOOKLINE 2 +#define LUA_HOOKCOUNT 3 +#define LUA_HOOKTAILRET 4 + + +/* +** Event masks +*/ +#define LUA_MASKCALL (1 << LUA_HOOKCALL) +#define LUA_MASKRET (1 << LUA_HOOKRET) +#define LUA_MASKLINE (1 << LUA_HOOKLINE) +#define LUA_MASKCOUNT (1 << LUA_HOOKCOUNT) + +typedef struct lua_Debug lua_Debug; /* activation record */ + + +/* Functions to be called by the debuger in specific events */ +typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar); + + +LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar); +LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar); +LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n); +LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n); +LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n); +LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n); + +LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count); +LUA_API lua_Hook lua_gethook (lua_State *L); +LUA_API int lua_gethookmask (lua_State *L); +LUA_API int lua_gethookcount (lua_State *L); + + +struct lua_Debug { + int event; + const char *name; /* (n) */ + const char *namewhat; /* (n) `global', `local', `field', `method' */ + const char *what; /* (S) `Lua', `C', `main', `tail' */ + const char *source; /* (S) */ + int currentline; /* (l) */ + int nups; /* (u) number of upvalues */ + int linedefined; /* (S) */ + int lastlinedefined; /* (S) */ + char short_src[LUA_IDSIZE]; /* (S) */ + /* private part */ + int i_ci; /* active function */ +}; + +LUA_API void lua_enablereadonlytable (lua_State *L, int index, int enabled); +LUA_API int lua_isreadonlytable (lua_State *L, int index); + +/* }====================================================================== */ + + +/****************************************************************************** +* Copyright (C) 1994-2012 Lua.org, PUC-Rio. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining +* a copy of this software and associated documentation files (the +* "Software"), to deal in the Software without restriction, including +* without limitation the rights to use, copy, modify, merge, publish, +* distribute, sublicense, and/or sell copies of the Software, and to +* permit persons to whom the Software is furnished to do so, subject to +* the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +******************************************************************************/ + + +#endif diff --git a/deps/lua/src/lua_bit.c b/deps/lua/src/lua_bit.c new file mode 100644 index 0000000..9f83b85 --- /dev/null +++ b/deps/lua/src/lua_bit.c @@ -0,0 +1,190 @@ +/* +** Lua BitOp -- a bit operations library for Lua 5.1/5.2. +** http://bitop.luajit.org/ +** +** Copyright (C) 2008-2012 Mike Pall. All rights reserved. +** +** Permission is hereby granted, free of charge, to any person obtaining +** a copy of this software and associated documentation files (the +** "Software"), to deal in the Software without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Software, and to +** permit persons to whom the Software is furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be +** included in all copies or substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +** +** [ MIT license: http://www.opensource.org/licenses/mit-license.php ] +*/ + +#define LUA_BITOP_VERSION "1.0.2" + +#define LUA_LIB +#include "lua.h" +#include "lauxlib.h" + +#ifdef _MSC_VER +/* MSVC is stuck in the last century and doesn't have C99's stdint.h. */ +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef unsigned __int64 uint64_t; +#else +#include +#endif + +typedef int32_t SBits; +typedef uint32_t UBits; + +typedef union { + lua_Number n; +#ifdef LUA_NUMBER_DOUBLE + uint64_t b; +#else + UBits b; +#endif +} BitNum; + +/* Convert argument to bit type. */ +static UBits barg(lua_State *L, int idx) +{ + BitNum bn; + UBits b; +#if LUA_VERSION_NUM < 502 + bn.n = lua_tonumber(L, idx); +#else + bn.n = luaL_checknumber(L, idx); +#endif +#if defined(LUA_NUMBER_DOUBLE) + bn.n += 6755399441055744.0; /* 2^52+2^51 */ +#ifdef SWAPPED_DOUBLE + b = (UBits)(bn.b >> 32); +#else + b = (UBits)bn.b; +#endif +#elif defined(LUA_NUMBER_INT) || defined(LUA_NUMBER_LONG) || \ + defined(LUA_NUMBER_LONGLONG) || defined(LUA_NUMBER_LONG_LONG) || \ + defined(LUA_NUMBER_LLONG) + if (sizeof(UBits) == sizeof(lua_Number)) + b = bn.b; + else + b = (UBits)(SBits)bn.n; +#elif defined(LUA_NUMBER_FLOAT) +#error "A 'float' lua_Number type is incompatible with this library" +#else +#error "Unknown number type, check LUA_NUMBER_* in luaconf.h" +#endif +#if LUA_VERSION_NUM < 502 + if (b == 0 && !lua_isnumber(L, idx)) { + luaL_typerror(L, idx, "number"); + } +#endif + return b; +} + +/* Return bit type. */ +#define BRET(b) lua_pushnumber(L, (lua_Number)(SBits)(b)); return 1; + +static int bit_tobit(lua_State *L) { BRET(barg(L, 1)) } +static int bit_bnot(lua_State *L) { BRET(~barg(L, 1)) } + +#define BIT_OP(func, opr) \ + static int func(lua_State *L) { int i; UBits b = barg(L, 1); \ + for (i = lua_gettop(L); i > 1; i--) b opr barg(L, i); \ + BRET(b) } +BIT_OP(bit_band, &=) +BIT_OP(bit_bor, |=) +BIT_OP(bit_bxor, ^=) + +#define bshl(b, n) (b << n) +#define bshr(b, n) (b >> n) +#define bsar(b, n) ((SBits)b >> n) +#define brol(b, n) ((b << n) | (b >> (32-n))) +#define bror(b, n) ((b << (32-n)) | (b >> n)) +#define BIT_SH(func, fn) \ + static int func(lua_State *L) { \ + UBits b = barg(L, 1); UBits n = barg(L, 2) & 31; BRET(fn(b, n)) } +BIT_SH(bit_lshift, bshl) +BIT_SH(bit_rshift, bshr) +BIT_SH(bit_arshift, bsar) +BIT_SH(bit_rol, brol) +BIT_SH(bit_ror, bror) + +static int bit_bswap(lua_State *L) +{ + UBits b = barg(L, 1); + b = (b >> 24) | ((b >> 8) & 0xff00) | ((b & 0xff00) << 8) | (b << 24); + BRET(b) +} + +static int bit_tohex(lua_State *L) +{ + UBits b = barg(L, 1); + SBits n = lua_isnone(L, 2) ? 8 : (SBits)barg(L, 2); + const char *hexdigits = "0123456789abcdef"; + char buf[8]; + int i; + if (n < 0) { n = -n; hexdigits = "0123456789ABCDEF"; } + if (n > 8) n = 8; + for (i = (int)n; --i >= 0; ) { buf[i] = hexdigits[b & 15]; b >>= 4; } + lua_pushlstring(L, buf, (size_t)n); + return 1; +} + +static const struct luaL_Reg bit_funcs[] = { + { "tobit", bit_tobit }, + { "bnot", bit_bnot }, + { "band", bit_band }, + { "bor", bit_bor }, + { "bxor", bit_bxor }, + { "lshift", bit_lshift }, + { "rshift", bit_rshift }, + { "arshift", bit_arshift }, + { "rol", bit_rol }, + { "ror", bit_ror }, + { "bswap", bit_bswap }, + { "tohex", bit_tohex }, + { NULL, NULL } +}; + +/* Signed right-shifts are implementation-defined per C89/C99. +** But the de facto standard are arithmetic right-shifts on two's +** complement CPUs. This behaviour is required here, so test for it. +*/ +#define BAD_SAR (bsar(-8, 2) != (SBits)-2) + +LUALIB_API int luaopen_bit(lua_State *L) +{ + UBits b; + lua_pushnumber(L, (lua_Number)1437217655L); + b = barg(L, -1); + if (b != (UBits)1437217655L || BAD_SAR) { /* Perform a simple self-test. */ + const char *msg = "compiled with incompatible luaconf.h"; +#ifdef LUA_NUMBER_DOUBLE +#ifdef _WIN32 + if (b == (UBits)1610612736L) + msg = "use D3DCREATE_FPU_PRESERVE with DirectX"; +#endif + if (b == (UBits)1127743488L) + msg = "not compiled with SWAPPED_DOUBLE"; +#endif + if (BAD_SAR) + msg = "arithmetic right-shift broken"; + luaL_error(L, "bit library self-test failed (%s)", msg); + } +#if LUA_VERSION_NUM < 502 + luaL_register(L, "bit", bit_funcs); +#else + luaL_newlib(L, bit_funcs); +#endif + return 1; +} + diff --git a/deps/lua/src/lua_cjson.c b/deps/lua/src/lua_cjson.c new file mode 100644 index 0000000..991f5d3 --- /dev/null +++ b/deps/lua/src/lua_cjson.c @@ -0,0 +1,1430 @@ +/* Lua CJSON - JSON support for Lua + * + * Copyright (c) 2010-2012 Mark Pulford + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* Caveats: + * - JSON "null" values are represented as lightuserdata since Lua + * tables cannot contain "nil". Compare with cjson.null. + * - Invalid UTF-8 characters are not detected and will be passed + * untouched. If required, UTF-8 error checking should be done + * outside this library. + * - Javascript comments are not part of the JSON spec, and are not + * currently supported. + * + * Note: Decoding is slower than encoding. Lua spends significant + * time (30%) managing tables when parsing JSON since it is + * difficult to know object/array sizes ahead of time. + */ + +#include +#include +#include +#include +#include +#include "lua.h" +#include "lauxlib.h" + +#include "strbuf.h" +#include "fpconv.h" + +#include "../../../src/solarisfixes.h" + +#ifndef CJSON_MODNAME +#define CJSON_MODNAME "cjson" +#endif + +#ifndef CJSON_VERSION +#define CJSON_VERSION "2.1.0" +#endif + +/* Workaround for Solaris platforms missing isinf() */ +#if !defined(isinf) && (defined(USE_INTERNAL_ISINF) || defined(MISSING_ISINF)) +#define isinf(x) (!isnan(x) && isnan((x) - (x))) +#endif + +#define DEFAULT_SPARSE_CONVERT 0 +#define DEFAULT_SPARSE_RATIO 2 +#define DEFAULT_SPARSE_SAFE 10 +#define DEFAULT_ENCODE_MAX_DEPTH 1000 +#define DEFAULT_DECODE_MAX_DEPTH 1000 +#define DEFAULT_ENCODE_INVALID_NUMBERS 0 +#define DEFAULT_DECODE_INVALID_NUMBERS 1 +#define DEFAULT_ENCODE_KEEP_BUFFER 1 +#define DEFAULT_ENCODE_NUMBER_PRECISION 14 + +#ifdef DISABLE_INVALID_NUMBERS +#undef DEFAULT_DECODE_INVALID_NUMBERS +#define DEFAULT_DECODE_INVALID_NUMBERS 0 +#endif + +typedef enum { + T_OBJ_BEGIN, + T_OBJ_END, + T_ARR_BEGIN, + T_ARR_END, + T_STRING, + T_NUMBER, + T_BOOLEAN, + T_NULL, + T_COLON, + T_COMMA, + T_END, + T_WHITESPACE, + T_ERROR, + T_UNKNOWN +} json_token_type_t; + +static const char *json_token_type_name[] = { + "T_OBJ_BEGIN", + "T_OBJ_END", + "T_ARR_BEGIN", + "T_ARR_END", + "T_STRING", + "T_NUMBER", + "T_BOOLEAN", + "T_NULL", + "T_COLON", + "T_COMMA", + "T_END", + "T_WHITESPACE", + "T_ERROR", + "T_UNKNOWN", + NULL +}; + +typedef struct { + json_token_type_t ch2token[256]; + char escape2char[256]; /* Decoding */ + + /* encode_buf is only allocated and used when + * encode_keep_buffer is set */ + strbuf_t encode_buf; + + int encode_sparse_convert; + int encode_sparse_ratio; + int encode_sparse_safe; + int encode_max_depth; + int encode_invalid_numbers; /* 2 => Encode as "null" */ + int encode_number_precision; + int encode_keep_buffer; + + int decode_invalid_numbers; + int decode_max_depth; +} json_config_t; + +typedef struct { + const char *data; + const char *ptr; + strbuf_t *tmp; /* Temporary storage for strings */ + json_config_t *cfg; + int current_depth; +} json_parse_t; + +typedef struct { + json_token_type_t type; + size_t index; + union { + const char *string; + double number; + int boolean; + } value; + size_t string_len; +} json_token_t; + +static const char *char2escape[256] = { + "\\u0000", "\\u0001", "\\u0002", "\\u0003", + "\\u0004", "\\u0005", "\\u0006", "\\u0007", + "\\b", "\\t", "\\n", "\\u000b", + "\\f", "\\r", "\\u000e", "\\u000f", + "\\u0010", "\\u0011", "\\u0012", "\\u0013", + "\\u0014", "\\u0015", "\\u0016", "\\u0017", + "\\u0018", "\\u0019", "\\u001a", "\\u001b", + "\\u001c", "\\u001d", "\\u001e", "\\u001f", + NULL, NULL, "\\\"", NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, "\\/", + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, "\\\\", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, "\\u007f", + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, +}; + +/* ===== CONFIGURATION ===== */ + +static json_config_t *json_fetch_config(lua_State *l) +{ + json_config_t *cfg; + + cfg = lua_touserdata(l, lua_upvalueindex(1)); + if (!cfg) + luaL_error(l, "BUG: Unable to fetch CJSON configuration"); + + return cfg; +} + +/* Ensure the correct number of arguments have been provided. + * Pad with nil to allow other functions to simply check arg[i] + * to find whether an argument was provided */ +static json_config_t *json_arg_init(lua_State *l, int args) +{ + luaL_argcheck(l, lua_gettop(l) <= args, args + 1, + "found too many arguments"); + + while (lua_gettop(l) < args) + lua_pushnil(l); + + return json_fetch_config(l); +} + +/* Process integer options for configuration functions */ +static int json_integer_option(lua_State *l, int optindex, int *setting, + int min, int max) +{ + char errmsg[64]; + int value; + + if (!lua_isnil(l, optindex)) { + value = luaL_checkinteger(l, optindex); + snprintf(errmsg, sizeof(errmsg), "expected integer between %d and %d", min, max); + luaL_argcheck(l, min <= value && value <= max, 1, errmsg); + *setting = value; + } + + lua_pushinteger(l, *setting); + + return 1; +} + +/* Process enumerated arguments for a configuration function */ +static int json_enum_option(lua_State *l, int optindex, int *setting, + const char **options, int bool_true) +{ + static const char *bool_options[] = { "off", "on", NULL }; + + if (!options) { + options = bool_options; + bool_true = 1; + } + + if (!lua_isnil(l, optindex)) { + if (bool_true && lua_isboolean(l, optindex)) + *setting = lua_toboolean(l, optindex) * bool_true; + else + *setting = luaL_checkoption(l, optindex, NULL, options); + } + + if (bool_true && (*setting == 0 || *setting == bool_true)) + lua_pushboolean(l, *setting); + else + lua_pushstring(l, options[*setting]); + + return 1; +} + +/* Configures handling of extremely sparse arrays: + * convert: Convert extremely sparse arrays into objects? Otherwise error. + * ratio: 0: always allow sparse; 1: never allow sparse; >1: use ratio + * safe: Always use an array when the max index <= safe */ +static int json_cfg_encode_sparse_array(lua_State *l) +{ + json_config_t *cfg = json_arg_init(l, 3); + + json_enum_option(l, 1, &cfg->encode_sparse_convert, NULL, 1); + json_integer_option(l, 2, &cfg->encode_sparse_ratio, 0, INT_MAX); + json_integer_option(l, 3, &cfg->encode_sparse_safe, 0, INT_MAX); + + return 3; +} + +/* Configures the maximum number of nested arrays/objects allowed when + * encoding */ +static int json_cfg_encode_max_depth(lua_State *l) +{ + json_config_t *cfg = json_arg_init(l, 1); + + return json_integer_option(l, 1, &cfg->encode_max_depth, 1, INT_MAX); +} + +/* Configures the maximum number of nested arrays/objects allowed when + * encoding */ +static int json_cfg_decode_max_depth(lua_State *l) +{ + json_config_t *cfg = json_arg_init(l, 1); + + return json_integer_option(l, 1, &cfg->decode_max_depth, 1, INT_MAX); +} + +/* Configures number precision when converting doubles to text */ +static int json_cfg_encode_number_precision(lua_State *l) +{ + json_config_t *cfg = json_arg_init(l, 1); + + return json_integer_option(l, 1, &cfg->encode_number_precision, 1, 14); +} + +/* Configures JSON encoding buffer persistence */ +static int json_cfg_encode_keep_buffer(lua_State *l) +{ + json_config_t *cfg = json_arg_init(l, 1); + int old_value; + + old_value = cfg->encode_keep_buffer; + + json_enum_option(l, 1, &cfg->encode_keep_buffer, NULL, 1); + + /* Init / free the buffer if the setting has changed */ + if (old_value ^ cfg->encode_keep_buffer) { + if (cfg->encode_keep_buffer) + strbuf_init(&cfg->encode_buf, 0); + else + strbuf_free(&cfg->encode_buf); + } + + return 1; +} + +#if defined(DISABLE_INVALID_NUMBERS) && !defined(USE_INTERNAL_FPCONV) +void json_verify_invalid_number_setting(lua_State *l, int *setting) +{ + if (*setting == 1) { + *setting = 0; + luaL_error(l, "Infinity, NaN, and/or hexadecimal numbers are not supported."); + } +} +#else +#define json_verify_invalid_number_setting(l, s) do { } while(0) +#endif + +static int json_cfg_encode_invalid_numbers(lua_State *l) +{ + static const char *options[] = { "off", "on", "null", NULL }; + json_config_t *cfg = json_arg_init(l, 1); + + json_enum_option(l, 1, &cfg->encode_invalid_numbers, options, 1); + + json_verify_invalid_number_setting(l, &cfg->encode_invalid_numbers); + + return 1; +} + +static int json_cfg_decode_invalid_numbers(lua_State *l) +{ + json_config_t *cfg = json_arg_init(l, 1); + + json_enum_option(l, 1, &cfg->decode_invalid_numbers, NULL, 1); + + json_verify_invalid_number_setting(l, &cfg->encode_invalid_numbers); + + return 1; +} + +static int json_destroy_config(lua_State *l) +{ + json_config_t *cfg; + + cfg = lua_touserdata(l, 1); + if (cfg) + strbuf_free(&cfg->encode_buf); + cfg = NULL; + + return 0; +} + +static void json_create_config(lua_State *l) +{ + json_config_t *cfg; + int i; + + cfg = lua_newuserdata(l, sizeof(*cfg)); + + /* Create GC method to clean up strbuf */ + lua_newtable(l); + lua_pushcfunction(l, json_destroy_config); + lua_setfield(l, -2, "__gc"); + lua_setmetatable(l, -2); + + cfg->encode_sparse_convert = DEFAULT_SPARSE_CONVERT; + cfg->encode_sparse_ratio = DEFAULT_SPARSE_RATIO; + cfg->encode_sparse_safe = DEFAULT_SPARSE_SAFE; + cfg->encode_max_depth = DEFAULT_ENCODE_MAX_DEPTH; + cfg->decode_max_depth = DEFAULT_DECODE_MAX_DEPTH; + cfg->encode_invalid_numbers = DEFAULT_ENCODE_INVALID_NUMBERS; + cfg->decode_invalid_numbers = DEFAULT_DECODE_INVALID_NUMBERS; + cfg->encode_keep_buffer = DEFAULT_ENCODE_KEEP_BUFFER; + cfg->encode_number_precision = DEFAULT_ENCODE_NUMBER_PRECISION; + +#if DEFAULT_ENCODE_KEEP_BUFFER > 0 + strbuf_init(&cfg->encode_buf, 0); +#endif + + /* Decoding init */ + + /* Tag all characters as an error */ + for (i = 0; i < 256; i++) + cfg->ch2token[i] = T_ERROR; + + /* Set tokens that require no further processing */ + cfg->ch2token['{'] = T_OBJ_BEGIN; + cfg->ch2token['}'] = T_OBJ_END; + cfg->ch2token['['] = T_ARR_BEGIN; + cfg->ch2token[']'] = T_ARR_END; + cfg->ch2token[','] = T_COMMA; + cfg->ch2token[':'] = T_COLON; + cfg->ch2token['\0'] = T_END; + cfg->ch2token[' '] = T_WHITESPACE; + cfg->ch2token['\t'] = T_WHITESPACE; + cfg->ch2token['\n'] = T_WHITESPACE; + cfg->ch2token['\r'] = T_WHITESPACE; + + /* Update characters that require further processing */ + cfg->ch2token['f'] = T_UNKNOWN; /* false? */ + cfg->ch2token['i'] = T_UNKNOWN; /* inf, ininity? */ + cfg->ch2token['I'] = T_UNKNOWN; + cfg->ch2token['n'] = T_UNKNOWN; /* null, nan? */ + cfg->ch2token['N'] = T_UNKNOWN; + cfg->ch2token['t'] = T_UNKNOWN; /* true? */ + cfg->ch2token['"'] = T_UNKNOWN; /* string? */ + cfg->ch2token['+'] = T_UNKNOWN; /* number? */ + cfg->ch2token['-'] = T_UNKNOWN; + for (i = 0; i < 10; i++) + cfg->ch2token['0' + i] = T_UNKNOWN; + + /* Lookup table for parsing escape characters */ + for (i = 0; i < 256; i++) + cfg->escape2char[i] = 0; /* String error */ + cfg->escape2char['"'] = '"'; + cfg->escape2char['\\'] = '\\'; + cfg->escape2char['/'] = '/'; + cfg->escape2char['b'] = '\b'; + cfg->escape2char['t'] = '\t'; + cfg->escape2char['n'] = '\n'; + cfg->escape2char['f'] = '\f'; + cfg->escape2char['r'] = '\r'; + cfg->escape2char['u'] = 'u'; /* Unicode parsing required */ +} + +/* ===== ENCODING ===== */ + +static void json_encode_exception(lua_State *l, json_config_t *cfg, strbuf_t *json, int lindex, + const char *reason) +{ + if (!cfg->encode_keep_buffer) + strbuf_free(json); + luaL_error(l, "Cannot serialise %s: %s", + lua_typename(l, lua_type(l, lindex)), reason); +} + +/* json_append_string args: + * - lua_State + * - JSON strbuf + * - String (Lua stack index) + * + * Returns nothing. Doesn't remove string from Lua stack */ +static void json_append_string(lua_State *l, strbuf_t *json, int lindex) +{ + const char *escstr; + int i; + const char *str; + size_t len; + + str = lua_tolstring(l, lindex, &len); + + /* Worst case is len * 6 (all unicode escapes). + * This buffer is reused constantly for small strings + * If there are any excess pages, they won't be hit anyway. + * This gains ~5% speedup. */ + if (len > SIZE_MAX / 6 - 3) + abort(); /* Overflow check */ + strbuf_ensure_empty_length(json, len * 6 + 2); + + strbuf_append_char_unsafe(json, '\"'); + for (i = 0; i < len; i++) { + escstr = char2escape[(unsigned char)str[i]]; + if (escstr) + strbuf_append_string(json, escstr); + else + strbuf_append_char_unsafe(json, str[i]); + } + strbuf_append_char_unsafe(json, '\"'); +} + +/* Find the size of the array on the top of the Lua stack + * -1 object (not a pure array) + * >=0 elements in array + */ +static int lua_array_length(lua_State *l, json_config_t *cfg, strbuf_t *json) +{ + double k; + int max; + int items; + + max = 0; + items = 0; + + lua_pushnil(l); + /* table, startkey */ + while (lua_next(l, -2) != 0) { + /* table, key, value */ + if (lua_type(l, -2) == LUA_TNUMBER && + (k = lua_tonumber(l, -2))) { + /* Integer >= 1 ? */ + if (floor(k) == k && k >= 1) { + if (k > max) + max = k; + items++; + lua_pop(l, 1); + continue; + } + } + + /* Must not be an array (non integer key) */ + lua_pop(l, 2); + return -1; + } + + /* Encode excessively sparse arrays as objects (if enabled) */ + if (cfg->encode_sparse_ratio > 0 && + max > items * cfg->encode_sparse_ratio && + max > cfg->encode_sparse_safe) { + if (!cfg->encode_sparse_convert) + json_encode_exception(l, cfg, json, -1, "excessively sparse array"); + + return -1; + } + + return max; +} + +static void json_check_encode_depth(lua_State *l, json_config_t *cfg, + int current_depth, strbuf_t *json) +{ + /* Ensure there are enough slots free to traverse a table (key, + * value) and push a string for a potential error message. + * + * Unlike "decode", the key and value are still on the stack when + * lua_checkstack() is called. Hence an extra slot for luaL_error() + * below is required just in case the next check to lua_checkstack() + * fails. + * + * While this won't cause a crash due to the EXTRA_STACK reserve + * slots, it would still be an improper use of the API. */ + if (current_depth <= cfg->encode_max_depth && lua_checkstack(l, 3)) + return; + + if (!cfg->encode_keep_buffer) + strbuf_free(json); + + luaL_error(l, "Cannot serialise, excessive nesting (%d)", + current_depth); +} + +static void json_append_data(lua_State *l, json_config_t *cfg, + int current_depth, strbuf_t *json); + +/* json_append_array args: + * - lua_State + * - JSON strbuf + * - Size of passwd Lua array (top of stack) */ +static void json_append_array(lua_State *l, json_config_t *cfg, int current_depth, + strbuf_t *json, int array_length) +{ + int comma, i; + + strbuf_append_char(json, '['); + + comma = 0; + for (i = 1; i <= array_length; i++) { + if (comma) + strbuf_append_char(json, ','); + else + comma = 1; + + lua_rawgeti(l, -1, i); + json_append_data(l, cfg, current_depth, json); + lua_pop(l, 1); + } + + strbuf_append_char(json, ']'); +} + +static void json_append_number(lua_State *l, json_config_t *cfg, + strbuf_t *json, int lindex) +{ + double num = lua_tonumber(l, lindex); + int len; + + if (cfg->encode_invalid_numbers == 0) { + /* Prevent encoding invalid numbers */ + if (isinf(num) || isnan(num)) + json_encode_exception(l, cfg, json, lindex, "must not be NaN or Inf"); + } else if (cfg->encode_invalid_numbers == 1) { + /* Encode invalid numbers, but handle "nan" separately + * since some platforms may encode as "-nan". */ + if (isnan(num)) { + strbuf_append_mem(json, "nan", 3); + return; + } + } else { + /* Encode invalid numbers as "null" */ + if (isinf(num) || isnan(num)) { + strbuf_append_mem(json, "null", 4); + return; + } + } + + strbuf_ensure_empty_length(json, FPCONV_G_FMT_BUFSIZE); + len = fpconv_g_fmt(strbuf_empty_ptr(json), num, cfg->encode_number_precision); + strbuf_extend_length(json, len); +} + +static void json_append_object(lua_State *l, json_config_t *cfg, + int current_depth, strbuf_t *json) +{ + int comma, keytype; + + /* Object */ + strbuf_append_char(json, '{'); + + lua_pushnil(l); + /* table, startkey */ + comma = 0; + while (lua_next(l, -2) != 0) { + if (comma) + strbuf_append_char(json, ','); + else + comma = 1; + + /* table, key, value */ + keytype = lua_type(l, -2); + if (keytype == LUA_TNUMBER) { + strbuf_append_char(json, '"'); + json_append_number(l, cfg, json, -2); + strbuf_append_mem(json, "\":", 2); + } else if (keytype == LUA_TSTRING) { + json_append_string(l, json, -2); + strbuf_append_char(json, ':'); + } else { + json_encode_exception(l, cfg, json, -2, + "table key must be a number or string"); + /* never returns */ + } + + /* table, key, value */ + json_append_data(l, cfg, current_depth, json); + lua_pop(l, 1); + /* table, key */ + } + + strbuf_append_char(json, '}'); +} + +/* Serialise Lua data into JSON string. */ +static void json_append_data(lua_State *l, json_config_t *cfg, + int current_depth, strbuf_t *json) +{ + int len; + + switch (lua_type(l, -1)) { + case LUA_TSTRING: + json_append_string(l, json, -1); + break; + case LUA_TNUMBER: + json_append_number(l, cfg, json, -1); + break; + case LUA_TBOOLEAN: + if (lua_toboolean(l, -1)) + strbuf_append_mem(json, "true", 4); + else + strbuf_append_mem(json, "false", 5); + break; + case LUA_TTABLE: + current_depth++; + json_check_encode_depth(l, cfg, current_depth, json); + len = lua_array_length(l, cfg, json); + if (len > 0) + json_append_array(l, cfg, current_depth, json, len); + else + json_append_object(l, cfg, current_depth, json); + break; + case LUA_TNIL: + strbuf_append_mem(json, "null", 4); + break; + case LUA_TLIGHTUSERDATA: + if (lua_touserdata(l, -1) == NULL) { + strbuf_append_mem(json, "null", 4); + break; + } + default: + /* Remaining types (LUA_TFUNCTION, LUA_TUSERDATA, LUA_TTHREAD, + * and LUA_TLIGHTUSERDATA) cannot be serialised */ + json_encode_exception(l, cfg, json, -1, "type not supported"); + /* never returns */ + } +} + +static int json_encode(lua_State *l) +{ + json_config_t *cfg = json_fetch_config(l); + strbuf_t local_encode_buf; + strbuf_t *encode_buf; + char *json; + size_t len; + + luaL_argcheck(l, lua_gettop(l) == 1, 1, "expected 1 argument"); + + if (!cfg->encode_keep_buffer) { + /* Use private buffer */ + encode_buf = &local_encode_buf; + strbuf_init(encode_buf, 0); + } else { + /* Reuse existing buffer */ + encode_buf = &cfg->encode_buf; + strbuf_reset(encode_buf); + } + + json_append_data(l, cfg, 0, encode_buf); + json = strbuf_string(encode_buf, &len); + + lua_pushlstring(l, json, len); + + if (!cfg->encode_keep_buffer) + strbuf_free(encode_buf); + + return 1; +} + +/* ===== DECODING ===== */ + +static void json_process_value(lua_State *l, json_parse_t *json, + json_token_t *token); + +static int hexdigit2int(char hex) +{ + if ('0' <= hex && hex <= '9') + return hex - '0'; + + /* Force lowercase */ + hex |= 0x20; + if ('a' <= hex && hex <= 'f') + return 10 + hex - 'a'; + + return -1; +} + +static int decode_hex4(const char *hex) +{ + int digit[4]; + int i; + + /* Convert ASCII hex digit to numeric digit + * Note: this returns an error for invalid hex digits, including + * NULL */ + for (i = 0; i < 4; i++) { + digit[i] = hexdigit2int(hex[i]); + if (digit[i] < 0) { + return -1; + } + } + + return (digit[0] << 12) + + (digit[1] << 8) + + (digit[2] << 4) + + digit[3]; +} + +/* Converts a Unicode codepoint to UTF-8. + * Returns UTF-8 string length, and up to 4 bytes in *utf8 */ +static int codepoint_to_utf8(char *utf8, int codepoint) +{ + /* 0xxxxxxx */ + if (codepoint <= 0x7F) { + utf8[0] = codepoint; + return 1; + } + + /* 110xxxxx 10xxxxxx */ + if (codepoint <= 0x7FF) { + utf8[0] = (codepoint >> 6) | 0xC0; + utf8[1] = (codepoint & 0x3F) | 0x80; + return 2; + } + + /* 1110xxxx 10xxxxxx 10xxxxxx */ + if (codepoint <= 0xFFFF) { + utf8[0] = (codepoint >> 12) | 0xE0; + utf8[1] = ((codepoint >> 6) & 0x3F) | 0x80; + utf8[2] = (codepoint & 0x3F) | 0x80; + return 3; + } + + /* 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ + if (codepoint <= 0x1FFFFF) { + utf8[0] = (codepoint >> 18) | 0xF0; + utf8[1] = ((codepoint >> 12) & 0x3F) | 0x80; + utf8[2] = ((codepoint >> 6) & 0x3F) | 0x80; + utf8[3] = (codepoint & 0x3F) | 0x80; + return 4; + } + + return 0; +} + + +/* Called when index pointing to beginning of UTF-16 code escape: \uXXXX + * \u is guaranteed to exist, but the remaining hex characters may be + * missing. + * Translate to UTF-8 and append to temporary token string. + * Must advance index to the next character to be processed. + * Returns: 0 success + * -1 error + */ +static int json_append_unicode_escape(json_parse_t *json) +{ + char utf8[4]; /* Surrogate pairs require 4 UTF-8 bytes */ + int codepoint; + int surrogate_low; + int len; + int escape_len = 6; + + /* Fetch UTF-16 code unit */ + codepoint = decode_hex4(json->ptr + 2); + if (codepoint < 0) + return -1; + + /* UTF-16 surrogate pairs take the following 2 byte form: + * 11011 x yyyyyyyyyy + * When x = 0: y is the high 10 bits of the codepoint + * x = 1: y is the low 10 bits of the codepoint + * + * Check for a surrogate pair (high or low) */ + if ((codepoint & 0xF800) == 0xD800) { + /* Error if the 1st surrogate is not high */ + if (codepoint & 0x400) + return -1; + + /* Ensure the next code is a unicode escape */ + if (*(json->ptr + escape_len) != '\\' || + *(json->ptr + escape_len + 1) != 'u') { + return -1; + } + + /* Fetch the next codepoint */ + surrogate_low = decode_hex4(json->ptr + 2 + escape_len); + if (surrogate_low < 0) + return -1; + + /* Error if the 2nd code is not a low surrogate */ + if ((surrogate_low & 0xFC00) != 0xDC00) + return -1; + + /* Calculate Unicode codepoint */ + codepoint = (codepoint & 0x3FF) << 10; + surrogate_low &= 0x3FF; + codepoint = (codepoint | surrogate_low) + 0x10000; + escape_len = 12; + } + + /* Convert codepoint to UTF-8 */ + len = codepoint_to_utf8(utf8, codepoint); + if (!len) + return -1; + + /* Append bytes and advance parse index */ + strbuf_append_mem_unsafe(json->tmp, utf8, len); + json->ptr += escape_len; + + return 0; +} + +static void json_set_token_error(json_token_t *token, json_parse_t *json, + const char *errtype) +{ + token->type = T_ERROR; + token->index = json->ptr - json->data; + token->value.string = errtype; +} + +static void json_next_string_token(json_parse_t *json, json_token_t *token) +{ + char *escape2char = json->cfg->escape2char; + char ch; + + /* Caller must ensure a string is next */ + assert(*json->ptr == '"'); + + /* Skip " */ + json->ptr++; + + /* json->tmp is the temporary strbuf used to accumulate the + * decoded string value. + * json->tmp is sized to handle JSON containing only a string value. + */ + strbuf_reset(json->tmp); + + while ((ch = *json->ptr) != '"') { + if (!ch) { + /* Premature end of the string */ + json_set_token_error(token, json, "unexpected end of string"); + return; + } + + /* Handle escapes */ + if (ch == '\\') { + /* Fetch escape character */ + ch = *(json->ptr + 1); + + /* Translate escape code and append to tmp string */ + ch = escape2char[(unsigned char)ch]; + if (ch == 'u') { + if (json_append_unicode_escape(json) == 0) + continue; + + json_set_token_error(token, json, + "invalid unicode escape code"); + return; + } + if (!ch) { + json_set_token_error(token, json, "invalid escape code"); + return; + } + + /* Skip '\' */ + json->ptr++; + } + /* Append normal character or translated single character + * Unicode escapes are handled above */ + strbuf_append_char_unsafe(json->tmp, ch); + json->ptr++; + } + json->ptr++; /* Eat final quote (") */ + + strbuf_ensure_null(json->tmp); + + token->type = T_STRING; + token->value.string = strbuf_string(json->tmp, &token->string_len); +} + +/* JSON numbers should take the following form: + * -?(0|[1-9]|[1-9][0-9]+)(.[0-9]+)?([eE][-+]?[0-9]+)? + * + * json_next_number_token() uses strtod() which allows other forms: + * - numbers starting with '+' + * - NaN, -NaN, infinity, -infinity + * - hexadecimal numbers + * - numbers with leading zeros + * + * json_is_invalid_number() detects "numbers" which may pass strtod()'s + * error checking, but should not be allowed with strict JSON. + * + * json_is_invalid_number() may pass numbers which cause strtod() + * to generate an error. + */ +static int json_is_invalid_number(json_parse_t *json) +{ + const char *p = json->ptr; + + /* Reject numbers starting with + */ + if (*p == '+') + return 1; + + /* Skip minus sign if it exists */ + if (*p == '-') + p++; + + /* Reject numbers starting with 0x, or leading zeros */ + if (*p == '0') { + int ch2 = *(p + 1); + + if ((ch2 | 0x20) == 'x' || /* Hex */ + ('0' <= ch2 && ch2 <= '9')) /* Leading zero */ + return 1; + + return 0; + } else if (*p <= '9') { + return 0; /* Ordinary number */ + } + + /* Reject inf/nan */ + if (!strncasecmp(p, "inf", 3)) + return 1; + if (!strncasecmp(p, "nan", 3)) + return 1; + + /* Pass all other numbers which may still be invalid, but + * strtod() will catch them. */ + return 0; +} + +static void json_next_number_token(json_parse_t *json, json_token_t *token) +{ + char *endptr; + + token->type = T_NUMBER; + token->value.number = fpconv_strtod(json->ptr, &endptr); + if (json->ptr == endptr) + json_set_token_error(token, json, "invalid number"); + else + json->ptr = endptr; /* Skip the processed number */ + + return; +} + +/* Fills in the token struct. + * T_STRING will return a pointer to the json_parse_t temporary string + * T_ERROR will leave the json->ptr pointer at the error. + */ +static void json_next_token(json_parse_t *json, json_token_t *token) +{ + const json_token_type_t *ch2token = json->cfg->ch2token; + int ch; + + /* Eat whitespace. */ + while (1) { + ch = (unsigned char)*(json->ptr); + token->type = ch2token[ch]; + if (token->type != T_WHITESPACE) + break; + json->ptr++; + } + + /* Store location of new token. Required when throwing errors + * for unexpected tokens (syntax errors). */ + token->index = json->ptr - json->data; + + /* Don't advance the pointer for an error or the end */ + if (token->type == T_ERROR) { + json_set_token_error(token, json, "invalid token"); + return; + } + + if (token->type == T_END) { + return; + } + + /* Found a known single character token, advance index and return */ + if (token->type != T_UNKNOWN) { + json->ptr++; + return; + } + + /* Process characters which triggered T_UNKNOWN + * + * Must use strncmp() to match the front of the JSON string. + * JSON identifier must be lowercase. + * When strict_numbers if disabled, either case is allowed for + * Infinity/NaN (since we are no longer following the spec..) */ + if (ch == '"') { + json_next_string_token(json, token); + return; + } else if (ch == '-' || ('0' <= ch && ch <= '9')) { + if (!json->cfg->decode_invalid_numbers && json_is_invalid_number(json)) { + json_set_token_error(token, json, "invalid number"); + return; + } + json_next_number_token(json, token); + return; + } else if (!strncmp(json->ptr, "true", 4)) { + token->type = T_BOOLEAN; + token->value.boolean = 1; + json->ptr += 4; + return; + } else if (!strncmp(json->ptr, "false", 5)) { + token->type = T_BOOLEAN; + token->value.boolean = 0; + json->ptr += 5; + return; + } else if (!strncmp(json->ptr, "null", 4)) { + token->type = T_NULL; + json->ptr += 4; + return; + } else if (json->cfg->decode_invalid_numbers && + json_is_invalid_number(json)) { + /* When decode_invalid_numbers is enabled, only attempt to process + * numbers we know are invalid JSON (Inf, NaN, hex) + * This is required to generate an appropriate token error, + * otherwise all bad tokens will register as "invalid number" + */ + json_next_number_token(json, token); + return; + } + + /* Token starts with t/f/n but isn't recognised above. */ + json_set_token_error(token, json, "invalid token"); +} + +/* This function does not return. + * DO NOT CALL WITH DYNAMIC MEMORY ALLOCATED. + * The only supported exception is the temporary parser string + * json->tmp struct. + * json and token should exist on the stack somewhere. + * luaL_error() will long_jmp and release the stack */ +static void json_throw_parse_error(lua_State *l, json_parse_t *json, + const char *exp, json_token_t *token) +{ + const char *found; + + strbuf_free(json->tmp); + + if (token->type == T_ERROR) + found = token->value.string; + else + found = json_token_type_name[token->type]; + + /* Note: token->index is 0 based, display starting from 1 */ + luaL_error(l, "Expected %s but found %s at character %d", + exp, found, token->index + 1); +} + +static inline void json_decode_ascend(json_parse_t *json) +{ + json->current_depth--; +} + +static void json_decode_descend(lua_State *l, json_parse_t *json, int slots) +{ + json->current_depth++; + + if (json->current_depth <= json->cfg->decode_max_depth && + lua_checkstack(l, slots)) { + return; + } + + strbuf_free(json->tmp); + luaL_error(l, "Found too many nested data structures (%d) at character %d", + json->current_depth, json->ptr - json->data); +} + +static void json_parse_object_context(lua_State *l, json_parse_t *json) +{ + json_token_t token; + + /* 3 slots required: + * .., table, key, value */ + json_decode_descend(l, json, 3); + + lua_newtable(l); + + json_next_token(json, &token); + + /* Handle empty objects */ + if (token.type == T_OBJ_END) { + json_decode_ascend(json); + return; + } + + while (1) { + if (token.type != T_STRING) + json_throw_parse_error(l, json, "object key string", &token); + + /* Push key */ + lua_pushlstring(l, token.value.string, token.string_len); + + json_next_token(json, &token); + if (token.type != T_COLON) + json_throw_parse_error(l, json, "colon", &token); + + /* Fetch value */ + json_next_token(json, &token); + json_process_value(l, json, &token); + + /* Set key = value */ + lua_rawset(l, -3); + + json_next_token(json, &token); + + if (token.type == T_OBJ_END) { + json_decode_ascend(json); + return; + } + + if (token.type != T_COMMA) + json_throw_parse_error(l, json, "comma or object end", &token); + + json_next_token(json, &token); + } +} + +/* Handle the array context */ +static void json_parse_array_context(lua_State *l, json_parse_t *json) +{ + json_token_t token; + int i; + + /* 2 slots required: + * .., table, value */ + json_decode_descend(l, json, 2); + + lua_newtable(l); + + json_next_token(json, &token); + + /* Handle empty arrays */ + if (token.type == T_ARR_END) { + json_decode_ascend(json); + return; + } + + for (i = 1; ; i++) { + json_process_value(l, json, &token); + lua_rawseti(l, -2, i); /* arr[i] = value */ + + json_next_token(json, &token); + + if (token.type == T_ARR_END) { + json_decode_ascend(json); + return; + } + + if (token.type != T_COMMA) + json_throw_parse_error(l, json, "comma or array end", &token); + + json_next_token(json, &token); + } +} + +/* Handle the "value" context */ +static void json_process_value(lua_State *l, json_parse_t *json, + json_token_t *token) +{ + switch (token->type) { + case T_STRING: + lua_pushlstring(l, token->value.string, token->string_len); + break;; + case T_NUMBER: + lua_pushnumber(l, token->value.number); + break;; + case T_BOOLEAN: + lua_pushboolean(l, token->value.boolean); + break;; + case T_OBJ_BEGIN: + json_parse_object_context(l, json); + break;; + case T_ARR_BEGIN: + json_parse_array_context(l, json); + break;; + case T_NULL: + /* In Lua, setting "t[k] = nil" will delete k from the table. + * Hence a NULL pointer lightuserdata object is used instead */ + lua_pushlightuserdata(l, NULL); + break;; + default: + json_throw_parse_error(l, json, "value", token); + } +} + +static int json_decode(lua_State *l) +{ + json_parse_t json; + json_token_t token; + size_t json_len; + + luaL_argcheck(l, lua_gettop(l) == 1, 1, "expected 1 argument"); + + json.cfg = json_fetch_config(l); + json.data = luaL_checklstring(l, 1, &json_len); + json.current_depth = 0; + json.ptr = json.data; + + /* Detect Unicode other than UTF-8 (see RFC 4627, Sec 3) + * + * CJSON can support any simple data type, hence only the first + * character is guaranteed to be ASCII (at worst: '"'). This is + * still enough to detect whether the wrong encoding is in use. */ + if (json_len >= 2 && (!json.data[0] || !json.data[1])) + luaL_error(l, "JSON parser does not support UTF-16 or UTF-32"); + + /* Ensure the temporary buffer can hold the entire string. + * This means we no longer need to do length checks since the decoded + * string must be smaller than the entire json string */ + json.tmp = strbuf_new(json_len); + + json_next_token(&json, &token); + json_process_value(l, &json, &token); + + /* Ensure there is no more input left */ + json_next_token(&json, &token); + + if (token.type != T_END) + json_throw_parse_error(l, &json, "the end", &token); + + strbuf_free(json.tmp); + + return 1; +} + +/* ===== INITIALISATION ===== */ + +#if !defined(LUA_VERSION_NUM) || LUA_VERSION_NUM < 502 +/* Compatibility for Lua 5.1. + * + * luaL_setfuncs() is used to create a module table where the functions have + * json_config_t as their first upvalue. Code borrowed from Lua 5.2 source. */ +static void luaL_setfuncs (lua_State *l, const luaL_Reg *reg, int nup) +{ + int i; + + luaL_checkstack(l, nup, "too many upvalues"); + for (; reg->name != NULL; reg++) { /* fill the table with given functions */ + for (i = 0; i < nup; i++) /* copy upvalues to the top */ + lua_pushvalue(l, -nup); + lua_pushcclosure(l, reg->func, nup); /* closure with those upvalues */ + lua_setfield(l, -(nup + 2), reg->name); + } + lua_pop(l, nup); /* remove upvalues */ +} +#endif + +/* Call target function in protected mode with all supplied args. + * Assumes target function only returns a single non-nil value. + * Convert and return thrown errors as: nil, "error message" */ +static int json_protect_conversion(lua_State *l) +{ + int err; + + /* Deliberately throw an error for invalid arguments */ + luaL_argcheck(l, lua_gettop(l) == 1, 1, "expected 1 argument"); + + /* pcall() the function stored as upvalue(1) */ + lua_pushvalue(l, lua_upvalueindex(1)); + lua_insert(l, 1); + err = lua_pcall(l, 1, 1, 0); + if (!err) + return 1; + + if (err == LUA_ERRRUN) { + lua_pushnil(l); + lua_insert(l, -2); + return 2; + } + + /* Since we are not using a custom error handler, the only remaining + * errors are memory related */ + return luaL_error(l, "Memory allocation error in CJSON protected call"); +} + +/* Return cjson module table */ +static int lua_cjson_new(lua_State *l) +{ + luaL_Reg reg[] = { + { "encode", json_encode }, + { "decode", json_decode }, + { "encode_sparse_array", json_cfg_encode_sparse_array }, + { "encode_max_depth", json_cfg_encode_max_depth }, + { "decode_max_depth", json_cfg_decode_max_depth }, + { "encode_number_precision", json_cfg_encode_number_precision }, + { "encode_keep_buffer", json_cfg_encode_keep_buffer }, + { "encode_invalid_numbers", json_cfg_encode_invalid_numbers }, + { "decode_invalid_numbers", json_cfg_decode_invalid_numbers }, + { "new", lua_cjson_new }, + { NULL, NULL } + }; + + /* Initialise number conversions */ + fpconv_init(); + + /* cjson module table */ + lua_newtable(l); + + /* Register functions with config data as upvalue */ + json_create_config(l); + luaL_setfuncs(l, reg, 1); + + /* Set cjson.null */ + lua_pushlightuserdata(l, NULL); + lua_setfield(l, -2, "null"); + + /* Set module name / version fields */ + lua_pushliteral(l, CJSON_MODNAME); + lua_setfield(l, -2, "_NAME"); + lua_pushliteral(l, CJSON_VERSION); + lua_setfield(l, -2, "_VERSION"); + + return 1; +} + +/* Return cjson.safe module table */ +static int lua_cjson_safe_new(lua_State *l) +{ + const char *func[] = { "decode", "encode", NULL }; + int i; + + lua_cjson_new(l); + + /* Fix new() method */ + lua_pushcfunction(l, lua_cjson_safe_new); + lua_setfield(l, -2, "new"); + + for (i = 0; func[i]; i++) { + lua_getfield(l, -1, func[i]); + lua_pushcclosure(l, json_protect_conversion, 1); + lua_setfield(l, -2, func[i]); + } + + return 1; +} + +int luaopen_cjson(lua_State *l) +{ + lua_cjson_new(l); + +#ifdef ENABLE_CJSON_GLOBAL + /* Register a global "cjson" table. */ + lua_pushvalue(l, -1); + lua_setglobal(l, CJSON_MODNAME); +#endif + + /* Return cjson table */ + return 1; +} + +int luaopen_cjson_safe(lua_State *l) +{ + lua_cjson_safe_new(l); + + /* Return cjson.safe table */ + return 1; +} + +/* vi:ai et sw=4 ts=4: + */ diff --git a/deps/lua/src/lua_cmsgpack.c b/deps/lua/src/lua_cmsgpack.c new file mode 100644 index 0000000..5f8929d --- /dev/null +++ b/deps/lua/src/lua_cmsgpack.c @@ -0,0 +1,981 @@ +#include +#include +#include +#include +#include + +#include "lua.h" +#include "lauxlib.h" + +#define LUACMSGPACK_NAME "cmsgpack" +#define LUACMSGPACK_SAFE_NAME "cmsgpack_safe" +#define LUACMSGPACK_VERSION "lua-cmsgpack 0.4.0" +#define LUACMSGPACK_COPYRIGHT "Copyright (C) 2012, Salvatore Sanfilippo" +#define LUACMSGPACK_DESCRIPTION "MessagePack C implementation for Lua" + +/* Allows a preprocessor directive to override MAX_NESTING */ +#ifndef LUACMSGPACK_MAX_NESTING + #define LUACMSGPACK_MAX_NESTING 16 /* Max tables nesting. */ +#endif + +/* Check if float or double can be an integer without loss of precision */ +#define IS_INT_TYPE_EQUIVALENT(x, T) (!isinf(x) && (T)(x) == (x)) + +#define IS_INT64_EQUIVALENT(x) IS_INT_TYPE_EQUIVALENT(x, int64_t) +#define IS_INT_EQUIVALENT(x) IS_INT_TYPE_EQUIVALENT(x, int) + +/* If size of pointer is equal to a 4 byte integer, we're on 32 bits. */ +#if UINTPTR_MAX == UINT_MAX + #define BITS_32 1 +#else + #define BITS_32 0 +#endif + +#if BITS_32 + #define lua_pushunsigned(L, n) lua_pushnumber(L, n) +#else + #define lua_pushunsigned(L, n) lua_pushinteger(L, n) +#endif + +/* ============================================================================= + * MessagePack implementation and bindings for Lua 5.1/5.2. + * Copyright(C) 2012 Salvatore Sanfilippo + * + * http://github.com/antirez/lua-cmsgpack + * + * For MessagePack specification check the following web site: + * http://wiki.msgpack.org/display/MSGPACK/Format+specification + * + * See Copyright Notice at the end of this file. + * + * CHANGELOG: + * 19-Feb-2012 (ver 0.1.0): Initial release. + * 20-Feb-2012 (ver 0.2.0): Tables encoding improved. + * 20-Feb-2012 (ver 0.2.1): Minor bug fixing. + * 20-Feb-2012 (ver 0.3.0): Module renamed lua-cmsgpack (was lua-msgpack). + * 04-Apr-2014 (ver 0.3.1): Lua 5.2 support and minor bug fix. + * 07-Apr-2014 (ver 0.4.0): Multiple pack/unpack, lua allocator, efficiency. + * ========================================================================== */ + +/* -------------------------- Endian conversion -------------------------------- + * We use it only for floats and doubles, all the other conversions performed + * in an endian independent fashion. So the only thing we need is a function + * that swaps a binary string if arch is little endian (and left it untouched + * otherwise). */ + +/* Reverse memory bytes if arch is little endian. Given the conceptual + * simplicity of the Lua build system we prefer check for endianess at runtime. + * The performance difference should be acceptable. */ +void memrevifle(void *ptr, size_t len) { + unsigned char *p = (unsigned char *)ptr, + *e = (unsigned char *)p+len-1, + aux; + int test = 1; + unsigned char *testp = (unsigned char*) &test; + + if (testp[0] == 0) return; /* Big endian, nothing to do. */ + len /= 2; + while(len--) { + aux = *p; + *p = *e; + *e = aux; + p++; + e--; + } +} + +/* ---------------------------- String buffer ---------------------------------- + * This is a simple implementation of string buffers. The only operation + * supported is creating empty buffers and appending bytes to it. + * The string buffer uses 2x preallocation on every realloc for O(N) append + * behavior. */ + +typedef struct mp_buf { + unsigned char *b; + size_t len, free; +} mp_buf; + +void *mp_realloc(lua_State *L, void *target, size_t osize,size_t nsize) { + void *(*local_realloc) (void *, void *, size_t osize, size_t nsize) = NULL; + void *ud; + + local_realloc = lua_getallocf(L, &ud); + + return local_realloc(ud, target, osize, nsize); +} + +mp_buf *mp_buf_new(lua_State *L) { + mp_buf *buf = NULL; + + /* Old size = 0; new size = sizeof(*buf) */ + buf = (mp_buf*)mp_realloc(L, NULL, 0, sizeof(*buf)); + + buf->b = NULL; + buf->len = buf->free = 0; + return buf; +} + +void mp_buf_append(lua_State *L, mp_buf *buf, const unsigned char *s, size_t len) { + if (buf->free < len) { + size_t newsize = buf->len+len; + if (newsize < buf->len || newsize >= SIZE_MAX/2) abort(); + newsize *= 2; + + buf->b = (unsigned char*)mp_realloc(L, buf->b, buf->len + buf->free, newsize); + buf->free = newsize - buf->len; + } + memcpy(buf->b+buf->len,s,len); + buf->len += len; + buf->free -= len; +} + +void mp_buf_free(lua_State *L, mp_buf *buf) { + mp_realloc(L, buf->b, buf->len + buf->free, 0); /* realloc to 0 = free */ + mp_realloc(L, buf, sizeof(*buf), 0); +} + +/* ---------------------------- String cursor ---------------------------------- + * This simple data structure is used for parsing. Basically you create a cursor + * using a string pointer and a length, then it is possible to access the + * current string position with cursor->p, check the remaining length + * in cursor->left, and finally consume more string using + * mp_cur_consume(cursor,len), to advance 'p' and subtract 'left'. + * An additional field cursor->error is set to zero on initialization and can + * be used to report errors. */ + +#define MP_CUR_ERROR_NONE 0 +#define MP_CUR_ERROR_EOF 1 /* Not enough data to complete operation. */ +#define MP_CUR_ERROR_BADFMT 2 /* Bad data format */ + +typedef struct mp_cur { + const unsigned char *p; + size_t left; + int err; +} mp_cur; + +void mp_cur_init(mp_cur *cursor, const unsigned char *s, size_t len) { + cursor->p = s; + cursor->left = len; + cursor->err = MP_CUR_ERROR_NONE; +} + +#define mp_cur_consume(_c,_len) do { _c->p += _len; _c->left -= _len; } while(0) + +/* When there is not enough room we set an error in the cursor and return. This + * is very common across the code so we have a macro to make the code look + * a bit simpler. */ +#define mp_cur_need(_c,_len) do { \ + if (_c->left < _len) { \ + _c->err = MP_CUR_ERROR_EOF; \ + return; \ + } \ +} while(0) + +/* ------------------------- Low level MP encoding -------------------------- */ + +void mp_encode_bytes(lua_State *L, mp_buf *buf, const unsigned char *s, size_t len) { + unsigned char hdr[5]; + size_t hdrlen; + + if (len < 32) { + hdr[0] = 0xa0 | (len&0xff); /* fix raw */ + hdrlen = 1; + } else if (len <= 0xff) { + hdr[0] = 0xd9; + hdr[1] = len; + hdrlen = 2; + } else if (len <= 0xffff) { + hdr[0] = 0xda; + hdr[1] = (len&0xff00)>>8; + hdr[2] = len&0xff; + hdrlen = 3; + } else { + hdr[0] = 0xdb; + hdr[1] = (len&0xff000000)>>24; + hdr[2] = (len&0xff0000)>>16; + hdr[3] = (len&0xff00)>>8; + hdr[4] = len&0xff; + hdrlen = 5; + } + mp_buf_append(L,buf,hdr,hdrlen); + mp_buf_append(L,buf,s,len); +} + +/* we assume IEEE 754 internal format for single and double precision floats. */ +void mp_encode_double(lua_State *L, mp_buf *buf, double d) { + unsigned char b[9]; + float f = d; + + assert(sizeof(f) == 4 && sizeof(d) == 8); + if (d == (double)f) { + b[0] = 0xca; /* float IEEE 754 */ + memcpy(b+1,&f,4); + memrevifle(b+1,4); + mp_buf_append(L,buf,b,5); + } else if (sizeof(d) == 8) { + b[0] = 0xcb; /* double IEEE 754 */ + memcpy(b+1,&d,8); + memrevifle(b+1,8); + mp_buf_append(L,buf,b,9); + } +} + +void mp_encode_int(lua_State *L, mp_buf *buf, int64_t n) { + unsigned char b[9]; + size_t enclen; + + if (n >= 0) { + if (n <= 127) { + b[0] = n & 0x7f; /* positive fixnum */ + enclen = 1; + } else if (n <= 0xff) { + b[0] = 0xcc; /* uint 8 */ + b[1] = n & 0xff; + enclen = 2; + } else if (n <= 0xffff) { + b[0] = 0xcd; /* uint 16 */ + b[1] = (n & 0xff00) >> 8; + b[2] = n & 0xff; + enclen = 3; + } else if (n <= 0xffffffffLL) { + b[0] = 0xce; /* uint 32 */ + b[1] = (n & 0xff000000) >> 24; + b[2] = (n & 0xff0000) >> 16; + b[3] = (n & 0xff00) >> 8; + b[4] = n & 0xff; + enclen = 5; + } else { + b[0] = 0xcf; /* uint 64 */ + b[1] = (n & 0xff00000000000000LL) >> 56; + b[2] = (n & 0xff000000000000LL) >> 48; + b[3] = (n & 0xff0000000000LL) >> 40; + b[4] = (n & 0xff00000000LL) >> 32; + b[5] = (n & 0xff000000) >> 24; + b[6] = (n & 0xff0000) >> 16; + b[7] = (n & 0xff00) >> 8; + b[8] = n & 0xff; + enclen = 9; + } + } else { + if (n >= -32) { + b[0] = ((signed char)n); /* negative fixnum */ + enclen = 1; + } else if (n >= -128) { + b[0] = 0xd0; /* int 8 */ + b[1] = n & 0xff; + enclen = 2; + } else if (n >= -32768) { + b[0] = 0xd1; /* int 16 */ + b[1] = (n & 0xff00) >> 8; + b[2] = n & 0xff; + enclen = 3; + } else if (n >= -2147483648LL) { + b[0] = 0xd2; /* int 32 */ + b[1] = (n & 0xff000000) >> 24; + b[2] = (n & 0xff0000) >> 16; + b[3] = (n & 0xff00) >> 8; + b[4] = n & 0xff; + enclen = 5; + } else { + b[0] = 0xd3; /* int 64 */ + b[1] = (n & 0xff00000000000000LL) >> 56; + b[2] = (n & 0xff000000000000LL) >> 48; + b[3] = (n & 0xff0000000000LL) >> 40; + b[4] = (n & 0xff00000000LL) >> 32; + b[5] = (n & 0xff000000) >> 24; + b[6] = (n & 0xff0000) >> 16; + b[7] = (n & 0xff00) >> 8; + b[8] = n & 0xff; + enclen = 9; + } + } + mp_buf_append(L,buf,b,enclen); +} + +void mp_encode_array(lua_State *L, mp_buf *buf, uint64_t n) { + unsigned char b[5]; + size_t enclen; + + if (n <= 15) { + b[0] = 0x90 | (n & 0xf); /* fix array */ + enclen = 1; + } else if (n <= 65535) { + b[0] = 0xdc; /* array 16 */ + b[1] = (n & 0xff00) >> 8; + b[2] = n & 0xff; + enclen = 3; + } else { + b[0] = 0xdd; /* array 32 */ + b[1] = (n & 0xff000000) >> 24; + b[2] = (n & 0xff0000) >> 16; + b[3] = (n & 0xff00) >> 8; + b[4] = n & 0xff; + enclen = 5; + } + mp_buf_append(L,buf,b,enclen); +} + +void mp_encode_map(lua_State *L, mp_buf *buf, uint64_t n) { + unsigned char b[5]; + int enclen; + + if (n <= 15) { + b[0] = 0x80 | (n & 0xf); /* fix map */ + enclen = 1; + } else if (n <= 65535) { + b[0] = 0xde; /* map 16 */ + b[1] = (n & 0xff00) >> 8; + b[2] = n & 0xff; + enclen = 3; + } else { + b[0] = 0xdf; /* map 32 */ + b[1] = (n & 0xff000000) >> 24; + b[2] = (n & 0xff0000) >> 16; + b[3] = (n & 0xff00) >> 8; + b[4] = n & 0xff; + enclen = 5; + } + mp_buf_append(L,buf,b,enclen); +} + +/* --------------------------- Lua types encoding --------------------------- */ + +void mp_encode_lua_string(lua_State *L, mp_buf *buf) { + size_t len; + const char *s; + + s = lua_tolstring(L,-1,&len); + mp_encode_bytes(L,buf,(const unsigned char*)s,len); +} + +void mp_encode_lua_bool(lua_State *L, mp_buf *buf) { + unsigned char b = lua_toboolean(L,-1) ? 0xc3 : 0xc2; + mp_buf_append(L,buf,&b,1); +} + +/* Lua 5.3 has a built in 64-bit integer type */ +void mp_encode_lua_integer(lua_State *L, mp_buf *buf) { +#if (LUA_VERSION_NUM < 503) && BITS_32 + lua_Number i = lua_tonumber(L,-1); +#else + lua_Integer i = lua_tointeger(L,-1); +#endif + mp_encode_int(L, buf, (int64_t)i); +} + +/* Lua 5.2 and lower only has 64-bit doubles, so we need to + * detect if the double may be representable as an int + * for Lua < 5.3 */ +void mp_encode_lua_number(lua_State *L, mp_buf *buf) { + lua_Number n = lua_tonumber(L,-1); + + if (IS_INT64_EQUIVALENT(n)) { + mp_encode_lua_integer(L, buf); + } else { + mp_encode_double(L,buf,(double)n); + } +} + +void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level); + +/* Convert a lua table into a message pack list. */ +void mp_encode_lua_table_as_array(lua_State *L, mp_buf *buf, int level) { +#if LUA_VERSION_NUM < 502 + size_t len = lua_objlen(L,-1), j; +#else + size_t len = lua_rawlen(L,-1), j; +#endif + + mp_encode_array(L,buf,len); + luaL_checkstack(L, 1, "in function mp_encode_lua_table_as_array"); + for (j = 1; j <= len; j++) { + lua_pushnumber(L,j); + lua_gettable(L,-2); + mp_encode_lua_type(L,buf,level+1); + } +} + +/* Convert a lua table into a message pack key-value map. */ +void mp_encode_lua_table_as_map(lua_State *L, mp_buf *buf, int level) { + size_t len = 0; + + /* First step: count keys into table. No other way to do it with the + * Lua API, we need to iterate a first time. Note that an alternative + * would be to do a single run, and then hack the buffer to insert the + * map opcodes for message pack. Too hackish for this lib. */ + luaL_checkstack(L, 3, "in function mp_encode_lua_table_as_map"); + lua_pushnil(L); + while(lua_next(L,-2)) { + lua_pop(L,1); /* remove value, keep key for next iteration. */ + len++; + } + + /* Step two: actually encoding of the map. */ + mp_encode_map(L,buf,len); + lua_pushnil(L); + while(lua_next(L,-2)) { + /* Stack: ... key value */ + lua_pushvalue(L,-2); /* Stack: ... key value key */ + mp_encode_lua_type(L,buf,level+1); /* encode key */ + mp_encode_lua_type(L,buf,level+1); /* encode val */ + } +} + +/* Returns true if the Lua table on top of the stack is exclusively composed + * of keys from numerical keys from 1 up to N, with N being the total number + * of elements, without any hole in the middle. */ +int table_is_an_array(lua_State *L) { + int count = 0, max = 0; +#if LUA_VERSION_NUM < 503 + lua_Number n; +#else + lua_Integer n; +#endif + + /* Stack top on function entry */ + int stacktop; + + stacktop = lua_gettop(L); + + luaL_checkstack(L, 2, "in function table_is_an_array"); + lua_pushnil(L); + while(lua_next(L,-2)) { + /* Stack: ... key value */ + lua_pop(L,1); /* Stack: ... key */ + /* The <= 0 check is valid here because we're comparing indexes. */ +#if LUA_VERSION_NUM < 503 + if ((LUA_TNUMBER != lua_type(L,-1)) || (n = lua_tonumber(L, -1)) <= 0 || + !IS_INT_EQUIVALENT(n)) +#else + if (!lua_isinteger(L,-1) || (n = lua_tointeger(L, -1)) <= 0) +#endif + { + lua_settop(L, stacktop); + return 0; + } + max = (n > max ? n : max); + count++; + } + /* We have the total number of elements in "count". Also we have + * the max index encountered in "max". We can't reach this code + * if there are indexes <= 0. If you also note that there can not be + * repeated keys into a table, you have that if max==count you are sure + * that there are all the keys form 1 to count (both included). */ + lua_settop(L, stacktop); + return max == count; +} + +/* If the length operator returns non-zero, that is, there is at least + * an object at key '1', we serialize to message pack list. Otherwise + * we use a map. */ +void mp_encode_lua_table(lua_State *L, mp_buf *buf, int level) { + if (table_is_an_array(L)) + mp_encode_lua_table_as_array(L,buf,level); + else + mp_encode_lua_table_as_map(L,buf,level); +} + +void mp_encode_lua_null(lua_State *L, mp_buf *buf) { + unsigned char b[1]; + + b[0] = 0xc0; + mp_buf_append(L,buf,b,1); +} + +void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level) { + int t = lua_type(L,-1); + + /* Limit the encoding of nested tables to a specified maximum depth, so that + * we survive when called against circular references in tables. */ + if (t == LUA_TTABLE && level == LUACMSGPACK_MAX_NESTING) t = LUA_TNIL; + switch(t) { + case LUA_TSTRING: mp_encode_lua_string(L,buf); break; + case LUA_TBOOLEAN: mp_encode_lua_bool(L,buf); break; + case LUA_TNUMBER: + #if LUA_VERSION_NUM < 503 + mp_encode_lua_number(L,buf); break; + #else + if (lua_isinteger(L, -1)) { + mp_encode_lua_integer(L, buf); + } else { + mp_encode_lua_number(L, buf); + } + break; + #endif + case LUA_TTABLE: mp_encode_lua_table(L,buf,level); break; + default: mp_encode_lua_null(L,buf); break; + } + lua_pop(L,1); +} + +/* + * Packs all arguments as a stream for multiple upacking later. + * Returns error if no arguments provided. + */ +int mp_pack(lua_State *L) { + int nargs = lua_gettop(L); + int i; + mp_buf *buf; + + if (nargs == 0) + return luaL_argerror(L, 0, "MessagePack pack needs input."); + + if (!lua_checkstack(L, nargs)) + return luaL_argerror(L, 0, "Too many arguments for MessagePack pack."); + + buf = mp_buf_new(L); + for(i = 1; i <= nargs; i++) { + /* Copy argument i to top of stack for _encode processing; + * the encode function pops it from the stack when complete. */ + luaL_checkstack(L, 1, "in function mp_check"); + lua_pushvalue(L, i); + + mp_encode_lua_type(L,buf,0); + + lua_pushlstring(L,(char*)buf->b,buf->len); + + /* Reuse the buffer for the next operation by + * setting its free count to the total buffer size + * and the current position to zero. */ + buf->free += buf->len; + buf->len = 0; + } + mp_buf_free(L, buf); + + /* Concatenate all nargs buffers together */ + lua_concat(L, nargs); + return 1; +} + +/* ------------------------------- Decoding --------------------------------- */ + +void mp_decode_to_lua_type(lua_State *L, mp_cur *c); + +void mp_decode_to_lua_array(lua_State *L, mp_cur *c, size_t len) { + assert(len <= UINT_MAX); + int index = 1; + + lua_newtable(L); + luaL_checkstack(L, 1, "in function mp_decode_to_lua_array"); + while(len--) { + lua_pushnumber(L,index++); + mp_decode_to_lua_type(L,c); + if (c->err) return; + lua_settable(L,-3); + } +} + +void mp_decode_to_lua_hash(lua_State *L, mp_cur *c, size_t len) { + assert(len <= UINT_MAX); + lua_newtable(L); + while(len--) { + mp_decode_to_lua_type(L,c); /* key */ + if (c->err) return; + mp_decode_to_lua_type(L,c); /* value */ + if (c->err) return; + lua_settable(L,-3); + } +} + +/* Decode a Message Pack raw object pointed by the string cursor 'c' to + * a Lua type, that is left as the only result on the stack. */ +void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { + mp_cur_need(c,1); + + /* If we return more than 18 elements, we must resize the stack to + * fit all our return values. But, there is no way to + * determine how many objects a msgpack will unpack to up front, so + * we request a +1 larger stack on each iteration (noop if stack is + * big enough, and when stack does require resize it doubles in size) */ + luaL_checkstack(L, 1, + "too many return values at once; " + "use unpack_one or unpack_limit instead."); + + switch(c->p[0]) { + case 0xcc: /* uint 8 */ + mp_cur_need(c,2); + lua_pushunsigned(L,c->p[1]); + mp_cur_consume(c,2); + break; + case 0xd0: /* int 8 */ + mp_cur_need(c,2); + lua_pushinteger(L,(signed char)c->p[1]); + mp_cur_consume(c,2); + break; + case 0xcd: /* uint 16 */ + mp_cur_need(c,3); + lua_pushunsigned(L, + (c->p[1] << 8) | + c->p[2]); + mp_cur_consume(c,3); + break; + case 0xd1: /* int 16 */ + mp_cur_need(c,3); + lua_pushinteger(L,(int16_t) + (c->p[1] << 8) | + c->p[2]); + mp_cur_consume(c,3); + break; + case 0xce: /* uint 32 */ + mp_cur_need(c,5); + lua_pushunsigned(L, + ((uint32_t)c->p[1] << 24) | + ((uint32_t)c->p[2] << 16) | + ((uint32_t)c->p[3] << 8) | + (uint32_t)c->p[4]); + mp_cur_consume(c,5); + break; + case 0xd2: /* int 32 */ + mp_cur_need(c,5); + lua_pushinteger(L, + ((int32_t)c->p[1] << 24) | + ((int32_t)c->p[2] << 16) | + ((int32_t)c->p[3] << 8) | + (int32_t)c->p[4]); + mp_cur_consume(c,5); + break; + case 0xcf: /* uint 64 */ + mp_cur_need(c,9); + lua_pushunsigned(L, + ((uint64_t)c->p[1] << 56) | + ((uint64_t)c->p[2] << 48) | + ((uint64_t)c->p[3] << 40) | + ((uint64_t)c->p[4] << 32) | + ((uint64_t)c->p[5] << 24) | + ((uint64_t)c->p[6] << 16) | + ((uint64_t)c->p[7] << 8) | + (uint64_t)c->p[8]); + mp_cur_consume(c,9); + break; + case 0xd3: /* int 64 */ + mp_cur_need(c,9); +#if LUA_VERSION_NUM < 503 + lua_pushnumber(L, +#else + lua_pushinteger(L, +#endif + ((int64_t)c->p[1] << 56) | + ((int64_t)c->p[2] << 48) | + ((int64_t)c->p[3] << 40) | + ((int64_t)c->p[4] << 32) | + ((int64_t)c->p[5] << 24) | + ((int64_t)c->p[6] << 16) | + ((int64_t)c->p[7] << 8) | + (int64_t)c->p[8]); + mp_cur_consume(c,9); + break; + case 0xc0: /* nil */ + lua_pushnil(L); + mp_cur_consume(c,1); + break; + case 0xc3: /* true */ + lua_pushboolean(L,1); + mp_cur_consume(c,1); + break; + case 0xc2: /* false */ + lua_pushboolean(L,0); + mp_cur_consume(c,1); + break; + case 0xca: /* float */ + mp_cur_need(c,5); + assert(sizeof(float) == 4); + { + float f; + memcpy(&f,c->p+1,4); + memrevifle(&f,4); + lua_pushnumber(L,f); + mp_cur_consume(c,5); + } + break; + case 0xcb: /* double */ + mp_cur_need(c,9); + assert(sizeof(double) == 8); + { + double d; + memcpy(&d,c->p+1,8); + memrevifle(&d,8); + lua_pushnumber(L,d); + mp_cur_consume(c,9); + } + break; + case 0xd9: /* raw 8 */ + mp_cur_need(c,2); + { + size_t l = c->p[1]; + mp_cur_need(c,2+l); + lua_pushlstring(L,(char*)c->p+2,l); + mp_cur_consume(c,2+l); + } + break; + case 0xda: /* raw 16 */ + mp_cur_need(c,3); + { + size_t l = (c->p[1] << 8) | c->p[2]; + mp_cur_need(c,3+l); + lua_pushlstring(L,(char*)c->p+3,l); + mp_cur_consume(c,3+l); + } + break; + case 0xdb: /* raw 32 */ + mp_cur_need(c,5); + { + size_t l = ((size_t)c->p[1] << 24) | + ((size_t)c->p[2] << 16) | + ((size_t)c->p[3] << 8) | + (size_t)c->p[4]; + mp_cur_consume(c,5); + mp_cur_need(c,l); + lua_pushlstring(L,(char*)c->p,l); + mp_cur_consume(c,l); + } + break; + case 0xdc: /* array 16 */ + mp_cur_need(c,3); + { + size_t l = (c->p[1] << 8) | c->p[2]; + mp_cur_consume(c,3); + mp_decode_to_lua_array(L,c,l); + } + break; + case 0xdd: /* array 32 */ + mp_cur_need(c,5); + { + size_t l = ((size_t)c->p[1] << 24) | + ((size_t)c->p[2] << 16) | + ((size_t)c->p[3] << 8) | + (size_t)c->p[4]; + mp_cur_consume(c,5); + mp_decode_to_lua_array(L,c,l); + } + break; + case 0xde: /* map 16 */ + mp_cur_need(c,3); + { + size_t l = (c->p[1] << 8) | c->p[2]; + mp_cur_consume(c,3); + mp_decode_to_lua_hash(L,c,l); + } + break; + case 0xdf: /* map 32 */ + mp_cur_need(c,5); + { + size_t l = ((size_t)c->p[1] << 24) | + ((size_t)c->p[2] << 16) | + ((size_t)c->p[3] << 8) | + (size_t)c->p[4]; + mp_cur_consume(c,5); + mp_decode_to_lua_hash(L,c,l); + } + break; + default: /* types that can't be idenitified by first byte value. */ + if ((c->p[0] & 0x80) == 0) { /* positive fixnum */ + lua_pushunsigned(L,c->p[0]); + mp_cur_consume(c,1); + } else if ((c->p[0] & 0xe0) == 0xe0) { /* negative fixnum */ + lua_pushinteger(L,(signed char)c->p[0]); + mp_cur_consume(c,1); + } else if ((c->p[0] & 0xe0) == 0xa0) { /* fix raw */ + size_t l = c->p[0] & 0x1f; + mp_cur_need(c,1+l); + lua_pushlstring(L,(char*)c->p+1,l); + mp_cur_consume(c,1+l); + } else if ((c->p[0] & 0xf0) == 0x90) { /* fix map */ + size_t l = c->p[0] & 0xf; + mp_cur_consume(c,1); + mp_decode_to_lua_array(L,c,l); + } else if ((c->p[0] & 0xf0) == 0x80) { /* fix map */ + size_t l = c->p[0] & 0xf; + mp_cur_consume(c,1); + mp_decode_to_lua_hash(L,c,l); + } else { + c->err = MP_CUR_ERROR_BADFMT; + } + } +} + +int mp_unpack_full(lua_State *L, lua_Integer limit, lua_Integer offset) { + size_t len; + const char *s; + mp_cur c; + int cnt; /* Number of objects unpacked */ + int decode_all = (!limit && !offset); + + s = luaL_checklstring(L,1,&len); /* if no match, exits */ + + if (offset < 0 || limit < 0) /* requesting negative off or lim is invalid */ + return luaL_error(L, + "Invalid request to unpack with offset of %d and limit of %d.", + (int) offset, (int) len); + else if (offset > len) + return luaL_error(L, + "Start offset %d greater than input length %d.", (int) offset, (int) len); + + if (decode_all) limit = INT_MAX; + + mp_cur_init(&c,(const unsigned char *)s+offset,len-offset); + + /* We loop over the decode because this could be a stream + * of multiple top-level values serialized together */ + for(cnt = 0; c.left > 0 && cnt < limit; cnt++) { + mp_decode_to_lua_type(L,&c); + + if (c.err == MP_CUR_ERROR_EOF) { + return luaL_error(L,"Missing bytes in input."); + } else if (c.err == MP_CUR_ERROR_BADFMT) { + return luaL_error(L,"Bad data format in input."); + } + } + + if (!decode_all) { + /* c->left is the remaining size of the input buffer. + * subtract the entire buffer size from the unprocessed size + * to get our next start offset */ + size_t new_offset = len - c.left; + if (new_offset > LONG_MAX) abort(); + + luaL_checkstack(L, 1, "in function mp_unpack_full"); + + /* Return offset -1 when we have have processed the entire buffer. */ + lua_pushinteger(L, c.left == 0 ? -1 : (lua_Integer) new_offset); + /* Results are returned with the arg elements still + * in place. Lua takes care of only returning + * elements above the args for us. + * In this case, we have one arg on the stack + * for this function, so we insert our first return + * value at position 2. */ + lua_insert(L, 2); + cnt += 1; /* increase return count by one to make room for offset */ + } + + return cnt; +} + +int mp_unpack(lua_State *L) { + return mp_unpack_full(L, 0, 0); +} + +int mp_unpack_one(lua_State *L) { + lua_Integer offset = luaL_optinteger(L, 2, 0); + /* Variable pop because offset may not exist */ + lua_pop(L, lua_gettop(L)-1); + return mp_unpack_full(L, 1, offset); +} + +int mp_unpack_limit(lua_State *L) { + lua_Integer limit = luaL_checkinteger(L, 2); + lua_Integer offset = luaL_optinteger(L, 3, 0); + /* Variable pop because offset may not exist */ + lua_pop(L, lua_gettop(L)-1); + + return mp_unpack_full(L, limit, offset); +} + +int mp_safe(lua_State *L) { + int argc, err, total_results; + + argc = lua_gettop(L); + + /* This adds our function to the bottom of the stack + * (the "call this function" position) */ + lua_pushvalue(L, lua_upvalueindex(1)); + lua_insert(L, 1); + + err = lua_pcall(L, argc, LUA_MULTRET, 0); + total_results = lua_gettop(L); + + if (!err) { + return total_results; + } else { + lua_pushnil(L); + lua_insert(L,-2); + return 2; + } +} + +/* -------------------------------------------------------------------------- */ +const struct luaL_Reg cmds[] = { + {"pack", mp_pack}, + {"unpack", mp_unpack}, + {"unpack_one", mp_unpack_one}, + {"unpack_limit", mp_unpack_limit}, + {0} +}; + +int luaopen_create(lua_State *L) { + int i; + /* Manually construct our module table instead of + * relying on _register or _newlib */ + lua_newtable(L); + + for (i = 0; i < (sizeof(cmds)/sizeof(*cmds) - 1); i++) { + lua_pushcfunction(L, cmds[i].func); + lua_setfield(L, -2, cmds[i].name); + } + + /* Add metadata */ + lua_pushliteral(L, LUACMSGPACK_NAME); + lua_setfield(L, -2, "_NAME"); + lua_pushliteral(L, LUACMSGPACK_VERSION); + lua_setfield(L, -2, "_VERSION"); + lua_pushliteral(L, LUACMSGPACK_COPYRIGHT); + lua_setfield(L, -2, "_COPYRIGHT"); + lua_pushliteral(L, LUACMSGPACK_DESCRIPTION); + lua_setfield(L, -2, "_DESCRIPTION"); + return 1; +} + +LUALIB_API int luaopen_cmsgpack(lua_State *L) { + luaopen_create(L); + +#if LUA_VERSION_NUM < 502 + /* Register name globally for 5.1 */ + lua_pushvalue(L, -1); + lua_setglobal(L, LUACMSGPACK_NAME); +#endif + + return 1; +} + +LUALIB_API int luaopen_cmsgpack_safe(lua_State *L) { + int i; + + luaopen_cmsgpack(L); + + /* Wrap all functions in the safe handler */ + for (i = 0; i < (sizeof(cmds)/sizeof(*cmds) - 1); i++) { + lua_getfield(L, -1, cmds[i].name); + lua_pushcclosure(L, mp_safe, 1); + lua_setfield(L, -2, cmds[i].name); + } + +#if LUA_VERSION_NUM < 502 + /* Register name globally for 5.1 */ + lua_pushvalue(L, -1); + lua_setglobal(L, LUACMSGPACK_SAFE_NAME); +#endif + + return 1; +} + +/****************************************************************************** +* Copyright (C) 2012 Salvatore Sanfilippo. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining +* a copy of this software and associated documentation files (the +* "Software"), to deal in the Software without restriction, including +* without limitation the rights to use, copy, modify, merge, publish, +* distribute, sublicense, and/or sell copies of the Software, and to +* permit persons to whom the Software is furnished to do so, subject to +* the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +******************************************************************************/ diff --git a/deps/lua/src/lua_struct.c b/deps/lua/src/lua_struct.c new file mode 100644 index 0000000..c58c8e7 --- /dev/null +++ b/deps/lua/src/lua_struct.c @@ -0,0 +1,429 @@ +/* +** {====================================================== +** Library for packing/unpacking structures. +** $Id: struct.c,v 1.7 2018/05/11 22:04:31 roberto Exp $ +** See Copyright Notice at the end of this file +** ======================================================= +*/ +/* +** Valid formats: +** > - big endian +** < - little endian +** ![num] - alignment +** x - pading +** b/B - signed/unsigned byte +** h/H - signed/unsigned short +** l/L - signed/unsigned long +** T - size_t +** i/In - signed/unsigned integer with size 'n' (default is size of int) +** cn - sequence of 'n' chars (from/to a string); when packing, n==0 means + the whole string; when unpacking, n==0 means use the previous + read number as the string length +** s - zero-terminated string +** f - float +** d - double +** ' ' - ignored +*/ + + +#include +#include +#include +#include +#include + + +#include "lua.h" +#include "lauxlib.h" + + +#if (LUA_VERSION_NUM >= 502) + +#define luaL_register(L,n,f) luaL_newlib(L,f) + +#endif + + +/* basic integer type */ +#if !defined(STRUCT_INT) +#define STRUCT_INT long +#endif + +typedef STRUCT_INT Inttype; + +/* corresponding unsigned version */ +typedef unsigned STRUCT_INT Uinttype; + + +/* maximum size (in bytes) for integral types */ +#define MAXINTSIZE 32 + +/* is 'x' a power of 2? */ +#define isp2(x) ((x) > 0 && ((x) & ((x) - 1)) == 0) + +/* dummy structure to get alignment requirements */ +struct cD { + char c; + double d; +}; + + +#define PADDING (sizeof(struct cD) - sizeof(double)) +#define MAXALIGN (PADDING > sizeof(int) ? PADDING : sizeof(int)) + + +/* endian options */ +#define BIG 0 +#define LITTLE 1 + + +static union { + int dummy; + char endian; +} const native = {1}; + + +typedef struct Header { + int endian; + int align; +} Header; + + +static int getnum (lua_State *L, const char **fmt, int df) { + if (!isdigit(**fmt)) /* no number? */ + return df; /* return default value */ + else { + int a = 0; + do { + if (a > (INT_MAX / 10) || a * 10 > (INT_MAX - (**fmt - '0'))) + luaL_error(L, "integral size overflow"); + a = a*10 + *((*fmt)++) - '0'; + } while (isdigit(**fmt)); + return a; + } +} + + +#define defaultoptions(h) ((h)->endian = native.endian, (h)->align = 1) + + + +static size_t optsize (lua_State *L, char opt, const char **fmt) { + switch (opt) { + case 'B': case 'b': return sizeof(char); + case 'H': case 'h': return sizeof(short); + case 'L': case 'l': return sizeof(long); + case 'T': return sizeof(size_t); + case 'f': return sizeof(float); + case 'd': return sizeof(double); + case 'x': return 1; + case 'c': return getnum(L, fmt, 1); + case 'i': case 'I': { + int sz = getnum(L, fmt, sizeof(int)); + if (sz > MAXINTSIZE) + luaL_error(L, "integral size %d is larger than limit of %d", + sz, MAXINTSIZE); + return sz; + } + default: return 0; /* other cases do not need alignment */ + } +} + + +/* +** return number of bytes needed to align an element of size 'size' +** at current position 'len' +*/ +static int gettoalign (size_t len, Header *h, int opt, size_t size) { + if (size == 0 || opt == 'c') return 0; + if (size > (size_t)h->align) + size = h->align; /* respect max. alignment */ + return (size - (len & (size - 1))) & (size - 1); +} + + +/* +** options to control endianess and alignment +*/ +static void controloptions (lua_State *L, int opt, const char **fmt, + Header *h) { + switch (opt) { + case ' ': return; /* ignore white spaces */ + case '>': h->endian = BIG; return; + case '<': h->endian = LITTLE; return; + case '!': { + int a = getnum(L, fmt, MAXALIGN); + if (!isp2(a)) + luaL_error(L, "alignment %d is not a power of 2", a); + h->align = a; + return; + } + default: { + const char *msg = lua_pushfstring(L, "invalid format option '%c'", opt); + luaL_argerror(L, 1, msg); + } + } +} + + +static void putinteger (lua_State *L, luaL_Buffer *b, int arg, int endian, + int size) { + lua_Number n = luaL_checknumber(L, arg); + Uinttype value; + char buff[MAXINTSIZE]; + if (n < 0) + value = (Uinttype)(Inttype)n; + else + value = (Uinttype)n; + if (endian == LITTLE) { + int i; + for (i = 0; i < size; i++) { + buff[i] = (value & 0xff); + value >>= 8; + } + } + else { + int i; + for (i = size - 1; i >= 0; i--) { + buff[i] = (value & 0xff); + value >>= 8; + } + } + luaL_addlstring(b, buff, size); +} + + +static void correctbytes (char *b, int size, int endian) { + if (endian != native.endian) { + int i = 0; + while (i < --size) { + char temp = b[i]; + b[i++] = b[size]; + b[size] = temp; + } + } +} + + +static int b_pack (lua_State *L) { + luaL_Buffer b; + const char *fmt = luaL_checkstring(L, 1); + Header h; + int arg = 2; + size_t totalsize = 0; + defaultoptions(&h); + lua_pushnil(L); /* mark to separate arguments from string buffer */ + luaL_buffinit(L, &b); + while (*fmt != '\0') { + int opt = *fmt++; + size_t size = optsize(L, opt, &fmt); + int toalign = gettoalign(totalsize, &h, opt, size); + totalsize += toalign; + while (toalign-- > 0) luaL_addchar(&b, '\0'); + switch (opt) { + case 'b': case 'B': case 'h': case 'H': + case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */ + putinteger(L, &b, arg++, h.endian, size); + break; + } + case 'x': { + luaL_addchar(&b, '\0'); + break; + } + case 'f': { + float f = (float)luaL_checknumber(L, arg++); + correctbytes((char *)&f, size, h.endian); + luaL_addlstring(&b, (char *)&f, size); + break; + } + case 'd': { + double d = luaL_checknumber(L, arg++); + correctbytes((char *)&d, size, h.endian); + luaL_addlstring(&b, (char *)&d, size); + break; + } + case 'c': case 's': { + size_t l; + const char *s = luaL_checklstring(L, arg++, &l); + if (size == 0) size = l; + luaL_argcheck(L, l >= (size_t)size, arg, "string too short"); + luaL_addlstring(&b, s, size); + if (opt == 's') { + luaL_addchar(&b, '\0'); /* add zero at the end */ + size++; + } + break; + } + default: controloptions(L, opt, &fmt, &h); + } + totalsize += size; + } + luaL_pushresult(&b); + return 1; +} + + +static lua_Number getinteger (const char *buff, int endian, + int issigned, int size) { + Uinttype l = 0; + int i; + if (endian == BIG) { + for (i = 0; i < size; i++) { + l <<= 8; + l |= (Uinttype)(unsigned char)buff[i]; + } + } + else { + for (i = size - 1; i >= 0; i--) { + l <<= 8; + l |= (Uinttype)(unsigned char)buff[i]; + } + } + if (!issigned) + return (lua_Number)l; + else { /* signed format */ + Uinttype mask = (Uinttype)(~((Uinttype)0)) << (size*8 - 1); + if (l & mask) /* negative value? */ + l |= mask; /* signal extension */ + return (lua_Number)(Inttype)l; + } +} + + +static int b_unpack (lua_State *L) { + Header h; + const char *fmt = luaL_checkstring(L, 1); + size_t ld; + const char *data = luaL_checklstring(L, 2, &ld); + size_t pos = luaL_optinteger(L, 3, 1); + luaL_argcheck(L, pos > 0, 3, "offset must be 1 or greater"); + pos--; /* Lua indexes are 1-based, but here we want 0-based for C + * pointer math. */ + int n = 0; /* number of results */ + defaultoptions(&h); + while (*fmt) { + int opt = *fmt++; + size_t size = optsize(L, opt, &fmt); + pos += gettoalign(pos, &h, opt, size); + luaL_argcheck(L, size <= ld && pos <= ld - size, + 2, "data string too short"); + /* stack space for item + next position */ + luaL_checkstack(L, 2, "too many results"); + switch (opt) { + case 'b': case 'B': case 'h': case 'H': + case 'l': case 'L': case 'T': case 'i': case 'I': { /* integer types */ + int issigned = islower(opt); + lua_Number res = getinteger(data+pos, h.endian, issigned, size); + lua_pushnumber(L, res); n++; + break; + } + case 'x': { + break; + } + case 'f': { + float f; + memcpy(&f, data+pos, size); + correctbytes((char *)&f, sizeof(f), h.endian); + lua_pushnumber(L, f); n++; + break; + } + case 'd': { + double d; + memcpy(&d, data+pos, size); + correctbytes((char *)&d, sizeof(d), h.endian); + lua_pushnumber(L, d); n++; + break; + } + case 'c': { + if (size == 0) { + if (n == 0 || !lua_isnumber(L, -1)) + luaL_error(L, "format 'c0' needs a previous size"); + size = lua_tonumber(L, -1); + lua_pop(L, 1); n--; + luaL_argcheck(L, size <= ld && pos <= ld - size, + 2, "data string too short"); + } + lua_pushlstring(L, data+pos, size); n++; + break; + } + case 's': { + const char *e = (const char *)memchr(data+pos, '\0', ld - pos); + if (e == NULL) + luaL_error(L, "unfinished string in data"); + size = (e - (data+pos)) + 1; + lua_pushlstring(L, data+pos, size - 1); n++; + break; + } + default: controloptions(L, opt, &fmt, &h); + } + pos += size; + } + lua_pushinteger(L, pos + 1); /* next position */ + return n + 1; +} + + +static int b_size (lua_State *L) { + Header h; + const char *fmt = luaL_checkstring(L, 1); + size_t pos = 0; + defaultoptions(&h); + while (*fmt) { + int opt = *fmt++; + size_t size = optsize(L, opt, &fmt); + pos += gettoalign(pos, &h, opt, size); + if (opt == 's') + luaL_argerror(L, 1, "option 's' has no fixed size"); + else if (opt == 'c' && size == 0) + luaL_argerror(L, 1, "option 'c0' has no fixed size"); + if (!isalnum(opt)) + controloptions(L, opt, &fmt, &h); + pos += size; + } + lua_pushinteger(L, pos); + return 1; +} + +/* }====================================================== */ + + + +static const struct luaL_Reg thislib[] = { + {"pack", b_pack}, + {"unpack", b_unpack}, + {"size", b_size}, + {NULL, NULL} +}; + + +LUALIB_API int luaopen_struct (lua_State *L); + +LUALIB_API int luaopen_struct (lua_State *L) { + luaL_register(L, "struct", thislib); + return 1; +} + + +/****************************************************************************** +* Copyright (C) 2010-2018 Lua.org, PUC-Rio. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining +* a copy of this software and associated documentation files (the +* "Software"), to deal in the Software without restriction, including +* without limitation the rights to use, copy, modify, merge, publish, +* distribute, sublicense, and/or sell copies of the Software, and to +* permit persons to whom the Software is furnished to do so, subject to +* the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +******************************************************************************/ + diff --git a/deps/lua/src/luac.c b/deps/lua/src/luac.c new file mode 100644 index 0000000..d070173 --- /dev/null +++ b/deps/lua/src/luac.c @@ -0,0 +1,200 @@ +/* +** $Id: luac.c,v 1.54 2006/06/02 17:37:11 lhf Exp $ +** Lua compiler (saves bytecodes to files; also list bytecodes) +** See Copyright Notice in lua.h +*/ + +#include +#include +#include +#include + +#define luac_c +#define LUA_CORE + +#include "lua.h" +#include "lauxlib.h" + +#include "ldo.h" +#include "lfunc.h" +#include "lmem.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lstring.h" +#include "lundump.h" + +#define PROGNAME "luac" /* default program name */ +#define OUTPUT PROGNAME ".out" /* default output file */ + +static int listing=0; /* list bytecodes? */ +static int dumping=1; /* dump bytecodes? */ +static int stripping=0; /* strip debug information? */ +static char Output[]={ OUTPUT }; /* default output file name */ +static const char* output=Output; /* actual output file name */ +static const char* progname=PROGNAME; /* actual program name */ + +static void fatal(const char* message) +{ + fprintf(stderr,"%s: %s\n",progname,message); + exit(EXIT_FAILURE); +} + +static void cannot(const char* what) +{ + fprintf(stderr,"%s: cannot %s %s: %s\n",progname,what,output,strerror(errno)); + exit(EXIT_FAILURE); +} + +static void usage(const char* message) +{ + if (*message=='-') + fprintf(stderr,"%s: unrecognized option " LUA_QS "\n",progname,message); + else + fprintf(stderr,"%s: %s\n",progname,message); + fprintf(stderr, + "usage: %s [options] [filenames].\n" + "Available options are:\n" + " - process stdin\n" + " -l list\n" + " -o name output to file " LUA_QL("name") " (default is \"%s\")\n" + " -p parse only\n" + " -s strip debug information\n" + " -v show version information\n" + " -- stop handling options\n", + progname,Output); + exit(EXIT_FAILURE); +} + +#define IS(s) (strcmp(argv[i],s)==0) + +static int doargs(int argc, char* argv[]) +{ + int i; + int version=0; + if (argv[0]!=NULL && *argv[0]!=0) progname=argv[0]; + for (i=1; itop+(i))->l.p) + +static const Proto* combine(lua_State* L, int n) +{ + if (n==1) + return toproto(L,-1); + else + { + int i,pc; + Proto* f=luaF_newproto(L); + setptvalue2s(L,L->top,f); incr_top(L); + f->source=luaS_newliteral(L,"=(" PROGNAME ")"); + f->maxstacksize=1; + pc=2*n+1; + f->code=luaM_newvector(L,pc,Instruction); + f->sizecode=pc; + f->p=luaM_newvector(L,n,Proto*); + f->sizep=n; + pc=0; + for (i=0; ip[i]=toproto(L,i-n-1); + f->code[pc++]=CREATE_ABx(OP_CLOSURE,0,i); + f->code[pc++]=CREATE_ABC(OP_CALL,0,1,1); + } + f->code[pc++]=CREATE_ABC(OP_RETURN,0,1,0); + return f; + } +} + +static int writer(lua_State* L, const void* p, size_t size, void* u) +{ + UNUSED(L); + return (fwrite(p,size,1,(FILE*)u)!=1) && (size!=0); +} + +struct Smain { + int argc; + char** argv; +}; + +static int pmain(lua_State* L) +{ + struct Smain* s = (struct Smain*)lua_touserdata(L, 1); + int argc=s->argc; + char** argv=s->argv; + const Proto* f; + int i; + if (!lua_checkstack(L,argc)) fatal("too many input files"); + for (i=0; i1); + if (dumping) + { + FILE* D= (output==NULL) ? stdout : fopen(output,"wb"); + if (D==NULL) cannot("open"); + lua_lock(L); + luaU_dump(L,f,writer,D,stripping); + lua_unlock(L); + if (ferror(D)) cannot("write"); + if (fclose(D)) cannot("close"); + } + return 0; +} + +int main(int argc, char* argv[]) +{ + lua_State* L; + struct Smain s; + int i=doargs(argc,argv); + argc-=i; argv+=i; + if (argc<=0) usage("no input files given"); + L=lua_open(); + if (L==NULL) fatal("not enough memory for state"); + s.argc=argc; + s.argv=argv; + if (lua_cpcall(L,pmain,&s)!=0) fatal(lua_tostring(L,-1)); + lua_close(L); + return EXIT_SUCCESS; +} diff --git a/deps/lua/src/luaconf.h b/deps/lua/src/luaconf.h new file mode 100644 index 0000000..e2cb261 --- /dev/null +++ b/deps/lua/src/luaconf.h @@ -0,0 +1,763 @@ +/* +** $Id: luaconf.h,v 1.82.1.7 2008/02/11 16:25:08 roberto Exp $ +** Configuration file for Lua +** See Copyright Notice in lua.h +*/ + + +#ifndef lconfig_h +#define lconfig_h + +#include +#include + + +/* +** ================================================================== +** Search for "@@" to find all configurable definitions. +** =================================================================== +*/ + + +/* +@@ LUA_ANSI controls the use of non-ansi features. +** CHANGE it (define it) if you want Lua to avoid the use of any +** non-ansi feature or library. +*/ +#if defined(__STRICT_ANSI__) +#define LUA_ANSI +#endif + + +#if !defined(LUA_ANSI) && defined(_WIN32) +#define LUA_WIN +#endif + +#if defined(LUA_USE_LINUX) +#define LUA_USE_POSIX +#define LUA_USE_DLOPEN /* needs an extra library: -ldl */ +#define LUA_USE_READLINE /* needs some extra libraries */ +#endif + +#if defined(LUA_USE_MACOSX) +#define LUA_USE_POSIX +#define LUA_DL_DYLD /* does not need extra library */ +#endif + + + +/* +@@ LUA_USE_POSIX includes all functionallity listed as X/Open System +@* Interfaces Extension (XSI). +** CHANGE it (define it) if your system is XSI compatible. +*/ +#if defined(LUA_USE_POSIX) +#define LUA_USE_MKSTEMP +#define LUA_USE_ISATTY +#define LUA_USE_POPEN +#define LUA_USE_ULONGJMP +#endif + + +/* +@@ LUA_PATH and LUA_CPATH are the names of the environment variables that +@* Lua check to set its paths. +@@ LUA_INIT is the name of the environment variable that Lua +@* checks for initialization code. +** CHANGE them if you want different names. +*/ +#define LUA_PATH "LUA_PATH" +#define LUA_CPATH "LUA_CPATH" +#define LUA_INIT "LUA_INIT" + + +/* +@@ LUA_PATH_DEFAULT is the default path that Lua uses to look for +@* Lua libraries. +@@ LUA_CPATH_DEFAULT is the default path that Lua uses to look for +@* C libraries. +** CHANGE them if your machine has a non-conventional directory +** hierarchy or if you want to install your libraries in +** non-conventional directories. +*/ +#if defined(_WIN32) +/* +** In Windows, any exclamation mark ('!') in the path is replaced by the +** path of the directory of the executable file of the current process. +*/ +#define LUA_LDIR "!\\lua\\" +#define LUA_CDIR "!\\" +#define LUA_PATH_DEFAULT \ + ".\\?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?\\init.lua;" \ + LUA_CDIR"?.lua;" LUA_CDIR"?\\init.lua" +#define LUA_CPATH_DEFAULT \ + ".\\?.dll;" LUA_CDIR"?.dll;" LUA_CDIR"loadall.dll" + +#else +#define LUA_ROOT "/usr/local/" +#define LUA_LDIR LUA_ROOT "share/lua/5.1/" +#define LUA_CDIR LUA_ROOT "lib/lua/5.1/" +#define LUA_PATH_DEFAULT \ + "./?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?/init.lua;" \ + LUA_CDIR"?.lua;" LUA_CDIR"?/init.lua" +#define LUA_CPATH_DEFAULT \ + "./?.so;" LUA_CDIR"?.so;" LUA_CDIR"loadall.so" +#endif + + +/* +@@ LUA_DIRSEP is the directory separator (for submodules). +** CHANGE it if your machine does not use "/" as the directory separator +** and is not Windows. (On Windows Lua automatically uses "\".) +*/ +#if defined(_WIN32) +#define LUA_DIRSEP "\\" +#else +#define LUA_DIRSEP "/" +#endif + + +/* +@@ LUA_PATHSEP is the character that separates templates in a path. +@@ LUA_PATH_MARK is the string that marks the substitution points in a +@* template. +@@ LUA_EXECDIR in a Windows path is replaced by the executable's +@* directory. +@@ LUA_IGMARK is a mark to ignore all before it when bulding the +@* luaopen_ function name. +** CHANGE them if for some reason your system cannot use those +** characters. (E.g., if one of those characters is a common character +** in file/directory names.) Probably you do not need to change them. +*/ +#define LUA_PATHSEP ";" +#define LUA_PATH_MARK "?" +#define LUA_EXECDIR "!" +#define LUA_IGMARK "-" + + +/* +@@ LUA_INTEGER is the integral type used by lua_pushinteger/lua_tointeger. +** CHANGE that if ptrdiff_t is not adequate on your machine. (On most +** machines, ptrdiff_t gives a good choice between int or long.) +*/ +#define LUA_INTEGER ptrdiff_t + + +/* +@@ LUA_API is a mark for all core API functions. +@@ LUALIB_API is a mark for all standard library functions. +** CHANGE them if you need to define those functions in some special way. +** For instance, if you want to create one Windows DLL with the core and +** the libraries, you may want to use the following definition (define +** LUA_BUILD_AS_DLL to get it). +*/ +#if defined(LUA_BUILD_AS_DLL) + +#if defined(LUA_CORE) || defined(LUA_LIB) +#define LUA_API __declspec(dllexport) +#else +#define LUA_API __declspec(dllimport) +#endif + +#else + +#define LUA_API extern + +#endif + +/* more often than not the libs go together with the core */ +#define LUALIB_API LUA_API + + +/* +@@ LUAI_FUNC is a mark for all extern functions that are not to be +@* exported to outside modules. +@@ LUAI_DATA is a mark for all extern (const) variables that are not to +@* be exported to outside modules. +** CHANGE them if you need to mark them in some special way. Elf/gcc +** (versions 3.2 and later) mark them as "hidden" to optimize access +** when Lua is compiled as a shared library. +*/ +#if defined(luaall_c) +#define LUAI_FUNC static +#define LUAI_DATA /* empty */ + +#elif defined(__GNUC__) && ((__GNUC__*100 + __GNUC_MINOR__) >= 302) && \ + defined(__ELF__) +#define LUAI_FUNC __attribute__((visibility("hidden"))) extern +#define LUAI_DATA LUAI_FUNC + +#else +#define LUAI_FUNC extern +#define LUAI_DATA extern +#endif + + + +/* +@@ LUA_QL describes how error messages quote program elements. +** CHANGE it if you want a different appearance. +*/ +#define LUA_QL(x) "'" x "'" +#define LUA_QS LUA_QL("%s") + + +/* +@@ LUA_IDSIZE gives the maximum size for the description of the source +@* of a function in debug information. +** CHANGE it if you want a different size. +*/ +#define LUA_IDSIZE 60 + + +/* +** {================================================================== +** Stand-alone configuration +** =================================================================== +*/ + +#if defined(lua_c) || defined(luaall_c) + +/* +@@ lua_stdin_is_tty detects whether the standard input is a 'tty' (that +@* is, whether we're running lua interactively). +** CHANGE it if you have a better definition for non-POSIX/non-Windows +** systems. +*/ +#if defined(LUA_USE_ISATTY) +#include +#define lua_stdin_is_tty() isatty(0) +#elif defined(LUA_WIN) +#include +#include +#define lua_stdin_is_tty() _isatty(_fileno(stdin)) +#else +#define lua_stdin_is_tty() 1 /* assume stdin is a tty */ +#endif + + +/* +@@ LUA_PROMPT is the default prompt used by stand-alone Lua. +@@ LUA_PROMPT2 is the default continuation prompt used by stand-alone Lua. +** CHANGE them if you want different prompts. (You can also change the +** prompts dynamically, assigning to globals _PROMPT/_PROMPT2.) +*/ +#define LUA_PROMPT "> " +#define LUA_PROMPT2 ">> " + + +/* +@@ LUA_PROGNAME is the default name for the stand-alone Lua program. +** CHANGE it if your stand-alone interpreter has a different name and +** your system is not able to detect that name automatically. +*/ +#define LUA_PROGNAME "lua" + + +/* +@@ LUA_MAXINPUT is the maximum length for an input line in the +@* stand-alone interpreter. +** CHANGE it if you need longer lines. +*/ +#define LUA_MAXINPUT 512 + + +/* +@@ lua_readline defines how to show a prompt and then read a line from +@* the standard input. +@@ lua_saveline defines how to "save" a read line in a "history". +@@ lua_freeline defines how to free a line read by lua_readline. +** CHANGE them if you want to improve this functionality (e.g., by using +** GNU readline and history facilities). +*/ +#if defined(LUA_USE_READLINE) +#include +#include +#include +#define lua_readline(L,b,p) ((void)L, ((b)=readline(p)) != NULL) +#define lua_saveline(L,idx) \ + if (lua_strlen(L,idx) > 0) /* non-empty line? */ \ + add_history(lua_tostring(L, idx)); /* add it to history */ +#define lua_freeline(L,b) ((void)L, free(b)) +#else +#define lua_readline(L,b,p) \ + ((void)L, fputs(p, stdout), fflush(stdout), /* show prompt */ \ + fgets(b, LUA_MAXINPUT, stdin) != NULL) /* get line */ +#define lua_saveline(L,idx) { (void)L; (void)idx; } +#define lua_freeline(L,b) { (void)L; (void)b; } +#endif + +#endif + +/* }================================================================== */ + + +/* +@@ LUAI_GCPAUSE defines the default pause between garbage-collector cycles +@* as a percentage. +** CHANGE it if you want the GC to run faster or slower (higher values +** mean larger pauses which mean slower collection.) You can also change +** this value dynamically. +*/ +#define LUAI_GCPAUSE 200 /* 200% (wait memory to double before next GC) */ + + +/* +@@ LUAI_GCMUL defines the default speed of garbage collection relative to +@* memory allocation as a percentage. +** CHANGE it if you want to change the granularity of the garbage +** collection. (Higher values mean coarser collections. 0 represents +** infinity, where each step performs a full collection.) You can also +** change this value dynamically. +*/ +#define LUAI_GCMUL 200 /* GC runs 'twice the speed' of memory allocation */ + + + +/* +@@ LUA_COMPAT_GETN controls compatibility with old getn behavior. +** CHANGE it (define it) if you want exact compatibility with the +** behavior of setn/getn in Lua 5.0. +*/ +#undef LUA_COMPAT_GETN + +/* +@@ LUA_COMPAT_LOADLIB controls compatibility about global loadlib. +** CHANGE it to undefined as soon as you do not need a global 'loadlib' +** function (the function is still available as 'package.loadlib'). +*/ +#undef LUA_COMPAT_LOADLIB + +/* +@@ LUA_COMPAT_VARARG controls compatibility with old vararg feature. +** CHANGE it to undefined as soon as your programs use only '...' to +** access vararg parameters (instead of the old 'arg' table). +*/ +#define LUA_COMPAT_VARARG + +/* +@@ LUA_COMPAT_MOD controls compatibility with old math.mod function. +** CHANGE it to undefined as soon as your programs use 'math.fmod' or +** the new '%' operator instead of 'math.mod'. +*/ +#define LUA_COMPAT_MOD + +/* +@@ LUA_COMPAT_LSTR controls compatibility with old long string nesting +@* facility. +** CHANGE it to 2 if you want the old behaviour, or undefine it to turn +** off the advisory error when nesting [[...]]. +*/ +#define LUA_COMPAT_LSTR 1 + +/* +@@ LUA_COMPAT_GFIND controls compatibility with old 'string.gfind' name. +** CHANGE it to undefined as soon as you rename 'string.gfind' to +** 'string.gmatch'. +*/ +#define LUA_COMPAT_GFIND + +/* +@@ LUA_COMPAT_OPENLIB controls compatibility with old 'luaL_openlib' +@* behavior. +** CHANGE it to undefined as soon as you replace to 'luaL_register' +** your uses of 'luaL_openlib' +*/ +#define LUA_COMPAT_OPENLIB + + + +/* +@@ luai_apicheck is the assert macro used by the Lua-C API. +** CHANGE luai_apicheck if you want Lua to perform some checks in the +** parameters it gets from API calls. This may slow down the interpreter +** a bit, but may be quite useful when debugging C code that interfaces +** with Lua. A useful redefinition is to use assert.h. +*/ +#if defined(LUA_USE_APICHECK) +#include +#define luai_apicheck(L,o) { (void)L; assert(o); } +#else +#define luai_apicheck(L,o) { (void)L; } +#endif + + +/* +@@ LUAI_BITSINT defines the number of bits in an int. +** CHANGE here if Lua cannot automatically detect the number of bits of +** your machine. Probably you do not need to change this. +*/ +/* avoid overflows in comparison */ +#if INT_MAX-20 < 32760 +#define LUAI_BITSINT 16 +#elif INT_MAX > 2147483640L +/* int has at least 32 bits */ +#define LUAI_BITSINT 32 +#else +#error "you must define LUA_BITSINT with number of bits in an integer" +#endif + + +/* +@@ LUAI_UINT32 is an unsigned integer with at least 32 bits. +@@ LUAI_INT32 is an signed integer with at least 32 bits. +@@ LUAI_UMEM is an unsigned integer big enough to count the total +@* memory used by Lua. +@@ LUAI_MEM is a signed integer big enough to count the total memory +@* used by Lua. +** CHANGE here if for some weird reason the default definitions are not +** good enough for your machine. (The definitions in the 'else' +** part always works, but may waste space on machines with 64-bit +** longs.) Probably you do not need to change this. +*/ +#if LUAI_BITSINT >= 32 +#define LUAI_UINT32 unsigned int +#define LUAI_INT32 int +#define LUAI_MAXINT32 INT_MAX +#define LUAI_UMEM size_t +#define LUAI_MEM ptrdiff_t +#else +/* 16-bit ints */ +#define LUAI_UINT32 unsigned long +#define LUAI_INT32 long +#define LUAI_MAXINT32 LONG_MAX +#define LUAI_UMEM unsigned long +#define LUAI_MEM long +#endif + + +/* +@@ LUAI_MAXCALLS limits the number of nested calls. +** CHANGE it if you need really deep recursive calls. This limit is +** arbitrary; its only purpose is to stop infinite recursion before +** exhausting memory. +*/ +#define LUAI_MAXCALLS 20000 + + +/* +@@ LUAI_MAXCSTACK limits the number of Lua stack slots that a C function +@* can use. +** CHANGE it if you need lots of (Lua) stack space for your C +** functions. This limit is arbitrary; its only purpose is to stop C +** functions to consume unlimited stack space. (must be smaller than +** -LUA_REGISTRYINDEX) +*/ +#define LUAI_MAXCSTACK 8000 + + + +/* +** {================================================================== +** CHANGE (to smaller values) the following definitions if your system +** has a small C stack. (Or you may want to change them to larger +** values if your system has a large C stack and these limits are +** too rigid for you.) Some of these constants control the size of +** stack-allocated arrays used by the compiler or the interpreter, while +** others limit the maximum number of recursive calls that the compiler +** or the interpreter can perform. Values too large may cause a C stack +** overflow for some forms of deep constructs. +** =================================================================== +*/ + + +/* +@@ LUAI_MAXCCALLS is the maximum depth for nested C calls (short) and +@* syntactical nested non-terminals in a program. +*/ +#define LUAI_MAXCCALLS 200 + + +/* +@@ LUAI_MAXVARS is the maximum number of local variables per function +@* (must be smaller than 250). +*/ +#define LUAI_MAXVARS 200 + + +/* +@@ LUAI_MAXUPVALUES is the maximum number of upvalues per function +@* (must be smaller than 250). +*/ +#define LUAI_MAXUPVALUES 60 + + +/* +@@ LUAL_BUFFERSIZE is the buffer size used by the lauxlib buffer system. +*/ +#define LUAL_BUFFERSIZE BUFSIZ + +/* }================================================================== */ + + + + +/* +** {================================================================== +@@ LUA_NUMBER is the type of numbers in Lua. +** CHANGE the following definitions only if you want to build Lua +** with a number type different from double. You may also need to +** change lua_number2int & lua_number2integer. +** =================================================================== +*/ + +#define LUA_NUMBER_DOUBLE +#define LUA_NUMBER double + +/* +@@ LUAI_UACNUMBER is the result of an 'usual argument conversion' +@* over a number. +*/ +#define LUAI_UACNUMBER double + + +/* +@@ LUA_NUMBER_SCAN is the format for reading numbers. +@@ LUA_NUMBER_FMT is the format for writing numbers. +@@ lua_number2str converts a number to a string. +@@ LUAI_MAXNUMBER2STR is maximum size of previous conversion. +@@ lua_str2number converts a string to a number. +*/ +#define LUA_NUMBER_SCAN "%lf" +#define LUA_NUMBER_FMT "%.14g" +#define lua_number2str(s,n) sprintf((s), LUA_NUMBER_FMT, (n)) +#define LUAI_MAXNUMBER2STR 32 /* 16 digits, sign, point, and \0 */ +#define lua_str2number(s,p) strtod((s), (p)) + + +/* +@@ The luai_num* macros define the primitive operations over numbers. +*/ +#if defined(LUA_CORE) +#include +#define luai_numadd(a,b) ((a)+(b)) +#define luai_numsub(a,b) ((a)-(b)) +#define luai_nummul(a,b) ((a)*(b)) +#define luai_numdiv(a,b) ((a)/(b)) +#define luai_nummod(a,b) ((a) - floor((a)/(b))*(b)) +#define luai_numpow(a,b) (pow(a,b)) +#define luai_numunm(a) (-(a)) +#define luai_numeq(a,b) ((a)==(b)) +#define luai_numlt(a,b) ((a)<(b)) +#define luai_numle(a,b) ((a)<=(b)) +#define luai_numisnan(a) (!luai_numeq((a), (a))) +#endif + + +/* +@@ lua_number2int is a macro to convert lua_Number to int. +@@ lua_number2integer is a macro to convert lua_Number to lua_Integer. +** CHANGE them if you know a faster way to convert a lua_Number to +** int (with any rounding method and without throwing errors) in your +** system. In Pentium machines, a naive typecast from double to int +** in C is extremely slow, so any alternative is worth trying. +*/ + +/* On a Pentium, resort to a trick */ +#if defined(LUA_NUMBER_DOUBLE) && !defined(LUA_ANSI) && !defined(__SSE2__) && \ + (defined(__i386) || defined (_M_IX86) || defined(__i386__)) + +/* On a Microsoft compiler, use assembler */ +#if defined(_MSC_VER) + +#define lua_number2int(i,d) __asm fld d __asm fistp i +#define lua_number2integer(i,n) lua_number2int(i, n) + +/* the next trick should work on any Pentium, but sometimes clashes + with a DirectX idiosyncrasy */ +#else + +union luai_Cast { double l_d; long l_l; }; +#define lua_number2int(i,d) \ + { volatile union luai_Cast u; u.l_d = (d) + 6755399441055744.0; (i) = u.l_l; } +#define lua_number2integer(i,n) lua_number2int(i, n) + +#endif + + +/* this option always works, but may be slow */ +#else +#define lua_number2int(i,d) ((i)=(int)(d)) +#define lua_number2integer(i,d) ((i)=(lua_Integer)(d)) + +#endif + +/* }================================================================== */ + + +/* +@@ LUAI_USER_ALIGNMENT_T is a type that requires maximum alignment. +** CHANGE it if your system requires alignments larger than double. (For +** instance, if your system supports long doubles and they must be +** aligned in 16-byte boundaries, then you should add long double in the +** union.) Probably you do not need to change this. +*/ +#define LUAI_USER_ALIGNMENT_T union { double u; void *s; long l; } + + +/* +@@ LUAI_THROW/LUAI_TRY define how Lua does exception handling. +** CHANGE them if you prefer to use longjmp/setjmp even with C++ +** or if want/don't to use _longjmp/_setjmp instead of regular +** longjmp/setjmp. By default, Lua handles errors with exceptions when +** compiling as C++ code, with _longjmp/_setjmp when asked to use them, +** and with longjmp/setjmp otherwise. +*/ +#if defined(__cplusplus) +/* C++ exceptions */ +#define LUAI_THROW(L,c) throw(c) +#define LUAI_TRY(L,c,a) try { a } catch(...) \ + { if ((c)->status == 0) (c)->status = -1; } +#define luai_jmpbuf int /* dummy variable */ + +#elif defined(LUA_USE_ULONGJMP) +/* in Unix, try _longjmp/_setjmp (more efficient) */ +#define LUAI_THROW(L,c) _longjmp((c)->b, 1) +#define LUAI_TRY(L,c,a) if (_setjmp((c)->b) == 0) { a } +#define luai_jmpbuf jmp_buf + +#else +/* default handling with long jumps */ +#define LUAI_THROW(L,c) longjmp((c)->b, 1) +#define LUAI_TRY(L,c,a) if (setjmp((c)->b) == 0) { a } +#define luai_jmpbuf jmp_buf + +#endif + + +/* +@@ LUA_MAXCAPTURES is the maximum number of captures that a pattern +@* can do during pattern-matching. +** CHANGE it if you need more captures. This limit is arbitrary. +*/ +#define LUA_MAXCAPTURES 32 + + +/* +@@ lua_tmpnam is the function that the OS library uses to create a +@* temporary name. +@@ LUA_TMPNAMBUFSIZE is the maximum size of a name created by lua_tmpnam. +** CHANGE them if you have an alternative to tmpnam (which is considered +** insecure) or if you want the original tmpnam anyway. By default, Lua +** uses tmpnam except when POSIX is available, where it uses mkstemp. +*/ +#if defined(loslib_c) || defined(luaall_c) + +#if defined(LUA_USE_MKSTEMP) +#include +#define LUA_TMPNAMBUFSIZE 32 +#define lua_tmpnam(b,e) { \ + strcpy(b, "/tmp/lua_XXXXXX"); \ + e = mkstemp(b); \ + if (e != -1) close(e); \ + e = (e == -1); } + +#else +#define LUA_TMPNAMBUFSIZE L_tmpnam +#define lua_tmpnam(b,e) { e = (tmpnam(b) == NULL); } +#endif + +#endif + + +/* +@@ lua_popen spawns a new process connected to the current one through +@* the file streams. +** CHANGE it if you have a way to implement it in your system. +*/ +#if defined(LUA_USE_POPEN) + +#define lua_popen(L,c,m) ((void)L, fflush(NULL), popen(c,m)) +#define lua_pclose(L,file) ((void)L, (pclose(file) != -1)) + +#elif defined(LUA_WIN) + +#define lua_popen(L,c,m) ((void)L, _popen(c,m)) +#define lua_pclose(L,file) ((void)L, (_pclose(file) != -1)) + +#else + +#define lua_popen(L,c,m) ((void)((void)c, m), \ + luaL_error(L, LUA_QL("popen") " not supported"), (FILE*)0) +#define lua_pclose(L,file) ((void)((void)L, file), 0) + +#endif + +/* +@@ LUA_DL_* define which dynamic-library system Lua should use. +** CHANGE here if Lua has problems choosing the appropriate +** dynamic-library system for your platform (either Windows' DLL, Mac's +** dyld, or Unix's dlopen). If your system is some kind of Unix, there +** is a good chance that it has dlopen, so LUA_DL_DLOPEN will work for +** it. To use dlopen you also need to adapt the src/Makefile (probably +** adding -ldl to the linker options), so Lua does not select it +** automatically. (When you change the makefile to add -ldl, you must +** also add -DLUA_USE_DLOPEN.) +** If you do not want any kind of dynamic library, undefine all these +** options. +** By default, _WIN32 gets LUA_DL_DLL and MAC OS X gets LUA_DL_DYLD. +*/ +#if defined(LUA_USE_DLOPEN) +#define LUA_DL_DLOPEN +#endif + +#if defined(LUA_WIN) +#define LUA_DL_DLL +#endif + + +/* +@@ LUAI_EXTRASPACE allows you to add user-specific data in a lua_State +@* (the data goes just *before* the lua_State pointer). +** CHANGE (define) this if you really need that. This value must be +** a multiple of the maximum alignment required for your machine. +*/ +#define LUAI_EXTRASPACE 0 + + +/* +@@ luai_userstate* allow user-specific actions on threads. +** CHANGE them if you defined LUAI_EXTRASPACE and need to do something +** extra when a thread is created/deleted/resumed/yielded. +*/ +#define luai_userstateopen(L) ((void)L) +#define luai_userstateclose(L) ((void)L) +#define luai_userstatethread(L,L1) ((void)L) +#define luai_userstatefree(L) ((void)L) +#define luai_userstateresume(L,n) ((void)L) +#define luai_userstateyield(L,n) ((void)L) + + +/* +@@ LUA_INTFRMLEN is the length modifier for integer conversions +@* in 'string.format'. +@@ LUA_INTFRM_T is the integer type correspoding to the previous length +@* modifier. +** CHANGE them if your system supports long long or does not support long. +*/ + +#if defined(LUA_USELONGLONG) + +#define LUA_INTFRMLEN "ll" +#define LUA_INTFRM_T long long + +#else + +#define LUA_INTFRMLEN "l" +#define LUA_INTFRM_T long + +#endif + + + +/* =================================================================== */ + +/* +** Local configuration. You can use this space to add your redefinitions +** without modifying the main part of the file. +*/ + + + +#endif + diff --git a/deps/lua/src/lualib.h b/deps/lua/src/lualib.h new file mode 100644 index 0000000..469417f --- /dev/null +++ b/deps/lua/src/lualib.h @@ -0,0 +1,53 @@ +/* +** $Id: lualib.h,v 1.36.1.1 2007/12/27 13:02:25 roberto Exp $ +** Lua standard libraries +** See Copyright Notice in lua.h +*/ + + +#ifndef lualib_h +#define lualib_h + +#include "lua.h" + + +/* Key to file-handle type */ +#define LUA_FILEHANDLE "FILE*" + + +#define LUA_COLIBNAME "coroutine" +LUALIB_API int (luaopen_base) (lua_State *L); + +#define LUA_TABLIBNAME "table" +LUALIB_API int (luaopen_table) (lua_State *L); + +#define LUA_IOLIBNAME "io" +LUALIB_API int (luaopen_io) (lua_State *L); + +#define LUA_OSLIBNAME "os" +LUALIB_API int (luaopen_os) (lua_State *L); + +#define LUA_STRLIBNAME "string" +LUALIB_API int (luaopen_string) (lua_State *L); + +#define LUA_MATHLIBNAME "math" +LUALIB_API int (luaopen_math) (lua_State *L); + +#define LUA_DBLIBNAME "debug" +LUALIB_API int (luaopen_debug) (lua_State *L); + +#define LUA_LOADLIBNAME "package" +LUALIB_API int (luaopen_package) (lua_State *L); + + +/* open all previous libraries */ +LUALIB_API void (luaL_openlibs) (lua_State *L); + + + +#ifndef lua_assert +#define lua_assert(x) ((void)0) +#endif + + +#endif diff --git a/deps/lua/src/lundump.c b/deps/lua/src/lundump.c new file mode 100644 index 0000000..8010a45 --- /dev/null +++ b/deps/lua/src/lundump.c @@ -0,0 +1,227 @@ +/* +** $Id: lundump.c,v 2.7.1.4 2008/04/04 19:51:41 roberto Exp $ +** load precompiled Lua chunks +** See Copyright Notice in lua.h +*/ + +#include + +#define lundump_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lmem.h" +#include "lobject.h" +#include "lstring.h" +#include "lundump.h" +#include "lzio.h" + +typedef struct { + lua_State* L; + ZIO* Z; + Mbuffer* b; + const char* name; +} LoadState; + +#ifdef LUAC_TRUST_BINARIES +#define IF(c,s) +#define error(S,s) +#else +#define IF(c,s) if (c) error(S,s) + +static void error(LoadState* S, const char* why) +{ + luaO_pushfstring(S->L,"%s: %s in precompiled chunk",S->name,why); + luaD_throw(S->L,LUA_ERRSYNTAX); +} +#endif + +#define LoadMem(S,b,n,size) LoadBlock(S,b,(n)*(size)) +#define LoadByte(S) (lu_byte)LoadChar(S) +#define LoadVar(S,x) LoadMem(S,&x,1,sizeof(x)) +#define LoadVector(S,b,n,size) LoadMem(S,b,n,size) + +static void LoadBlock(LoadState* S, void* b, size_t size) +{ + size_t r=luaZ_read(S->Z,b,size); + IF (r!=0, "unexpected end"); +} + +static int LoadChar(LoadState* S) +{ + char x; + LoadVar(S,x); + return x; +} + +static int LoadInt(LoadState* S) +{ + int x; + LoadVar(S,x); + IF (x<0, "bad integer"); + return x; +} + +static lua_Number LoadNumber(LoadState* S) +{ + lua_Number x; + LoadVar(S,x); + return x; +} + +static TString* LoadString(LoadState* S) +{ + size_t size; + LoadVar(S,size); + if (size==0) + return NULL; + else + { + char* s=luaZ_openspace(S->L,S->b,size); + LoadBlock(S,s,size); + return luaS_newlstr(S->L,s,size-1); /* remove trailing '\0' */ + } +} + +static void LoadCode(LoadState* S, Proto* f) +{ + int n=LoadInt(S); + f->code=luaM_newvector(S->L,n,Instruction); + f->sizecode=n; + LoadVector(S,f->code,n,sizeof(Instruction)); +} + +static Proto* LoadFunction(LoadState* S, TString* p); + +static void LoadConstants(LoadState* S, Proto* f) +{ + int i,n; + n=LoadInt(S); + f->k=luaM_newvector(S->L,n,TValue); + f->sizek=n; + for (i=0; ik[i]); + for (i=0; ik[i]; + int t=LoadChar(S); + switch (t) + { + case LUA_TNIL: + setnilvalue(o); + break; + case LUA_TBOOLEAN: + setbvalue(o,LoadChar(S)!=0); + break; + case LUA_TNUMBER: + setnvalue(o,LoadNumber(S)); + break; + case LUA_TSTRING: + setsvalue2n(S->L,o,LoadString(S)); + break; + default: + error(S,"bad constant"); + break; + } + } + n=LoadInt(S); + f->p=luaM_newvector(S->L,n,Proto*); + f->sizep=n; + for (i=0; ip[i]=NULL; + for (i=0; ip[i]=LoadFunction(S,f->source); +} + +static void LoadDebug(LoadState* S, Proto* f) +{ + int i,n; + n=LoadInt(S); + f->lineinfo=luaM_newvector(S->L,n,int); + f->sizelineinfo=n; + LoadVector(S,f->lineinfo,n,sizeof(int)); + n=LoadInt(S); + f->locvars=luaM_newvector(S->L,n,LocVar); + f->sizelocvars=n; + for (i=0; ilocvars[i].varname=NULL; + for (i=0; ilocvars[i].varname=LoadString(S); + f->locvars[i].startpc=LoadInt(S); + f->locvars[i].endpc=LoadInt(S); + } + n=LoadInt(S); + f->upvalues=luaM_newvector(S->L,n,TString*); + f->sizeupvalues=n; + for (i=0; iupvalues[i]=NULL; + for (i=0; iupvalues[i]=LoadString(S); +} + +static Proto* LoadFunction(LoadState* S, TString* p) +{ + Proto* f; + if (++S->L->nCcalls > LUAI_MAXCCALLS) error(S,"code too deep"); + f=luaF_newproto(S->L); + setptvalue2s(S->L,S->L->top,f); incr_top(S->L); + f->source=LoadString(S); if (f->source==NULL) f->source=p; + f->linedefined=LoadInt(S); + f->lastlinedefined=LoadInt(S); + f->nups=LoadByte(S); + f->numparams=LoadByte(S); + f->is_vararg=LoadByte(S); + f->maxstacksize=LoadByte(S); + LoadCode(S,f); + LoadConstants(S,f); + LoadDebug(S,f); + IF (!luaG_checkcode(f), "bad code"); + S->L->top--; + S->L->nCcalls--; + return f; +} + +static void LoadHeader(LoadState* S) +{ + char h[LUAC_HEADERSIZE]; + char s[LUAC_HEADERSIZE]; + luaU_header(h); + LoadBlock(S,s,LUAC_HEADERSIZE); + IF (memcmp(h,s,LUAC_HEADERSIZE)!=0, "bad header"); +} + +/* +** load precompiled chunk +*/ +Proto* luaU_undump (lua_State* L, ZIO* Z, Mbuffer* buff, const char* name) +{ + LoadState S; + if (*name=='@' || *name=='=') + S.name=name+1; + else if (*name==LUA_SIGNATURE[0]) + S.name="binary string"; + else + S.name=name; + S.L=L; + S.Z=Z; + S.b=buff; + LoadHeader(&S); + return LoadFunction(&S,luaS_newliteral(L,"=?")); +} + +/* +* make header +*/ +void luaU_header (char* h) +{ + int x=1; + memcpy(h,LUA_SIGNATURE,sizeof(LUA_SIGNATURE)-1); + h+=sizeof(LUA_SIGNATURE)-1; + *h++=(char)LUAC_VERSION; + *h++=(char)LUAC_FORMAT; + *h++=(char)*(char*)&x; /* endianness */ + *h++=(char)sizeof(int); + *h++=(char)sizeof(size_t); + *h++=(char)sizeof(Instruction); + *h++=(char)sizeof(lua_Number); + *h++=(char)(((lua_Number)0.5)==0); /* is lua_Number integral? */ +} diff --git a/deps/lua/src/lundump.h b/deps/lua/src/lundump.h new file mode 100644 index 0000000..c80189d --- /dev/null +++ b/deps/lua/src/lundump.h @@ -0,0 +1,36 @@ +/* +** $Id: lundump.h,v 1.37.1.1 2007/12/27 13:02:25 roberto Exp $ +** load precompiled Lua chunks +** See Copyright Notice in lua.h +*/ + +#ifndef lundump_h +#define lundump_h + +#include "lobject.h" +#include "lzio.h" + +/* load one chunk; from lundump.c */ +LUAI_FUNC Proto* luaU_undump (lua_State* L, ZIO* Z, Mbuffer* buff, const char* name); + +/* make header; from lundump.c */ +LUAI_FUNC void luaU_header (char* h); + +/* dump one chunk; from ldump.c */ +LUAI_FUNC int luaU_dump (lua_State* L, const Proto* f, lua_Writer w, void* data, int strip); + +#ifdef luac_c +/* print one chunk; from print.c */ +LUAI_FUNC void luaU_print (const Proto* f, int full); +#endif + +/* for header of binary files -- this is Lua 5.1 */ +#define LUAC_VERSION 0x51 + +/* for header of binary files -- this is the official format */ +#define LUAC_FORMAT 0 + +/* size of header of binary files */ +#define LUAC_HEADERSIZE 12 + +#endif diff --git a/deps/lua/src/lvm.c b/deps/lua/src/lvm.c new file mode 100644 index 0000000..5cd313b --- /dev/null +++ b/deps/lua/src/lvm.c @@ -0,0 +1,769 @@ +/* +** $Id: lvm.c,v 2.63.1.5 2011/08/17 20:43:11 roberto Exp $ +** Lua virtual machine +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include + +#define lvm_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lgc.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" +#include "lvm.h" + + + +/* limit for table tag-method chains (to avoid loops) */ +#define MAXTAGLOOP 100 + + +const TValue *luaV_tonumber (const TValue *obj, TValue *n) { + lua_Number num; + if (ttisnumber(obj)) return obj; + if (ttisstring(obj) && luaO_str2d(svalue(obj), &num)) { + setnvalue(n, num); + return n; + } + else + return NULL; +} + + +int luaV_tostring (lua_State *L, StkId obj) { + if (!ttisnumber(obj)) + return 0; + else { + char s[LUAI_MAXNUMBER2STR]; + lua_Number n = nvalue(obj); + lua_number2str(s, n); + setsvalue2s(L, obj, luaS_new(L, s)); + return 1; + } +} + + +static void traceexec (lua_State *L, const Instruction *pc) { + lu_byte mask = L->hookmask; + const Instruction *oldpc = L->savedpc; + L->savedpc = pc; + if ((mask & LUA_MASKCOUNT) && L->hookcount == 0) { + resethookcount(L); + luaD_callhook(L, LUA_HOOKCOUNT, -1); + } + if (mask & LUA_MASKLINE) { + Proto *p = ci_func(L->ci)->l.p; + int npc = pcRel(pc, p); + int newline = getline(p, npc); + /* call linehook when enter a new function, when jump back (loop), + or when enter a new line */ + if (npc == 0 || pc <= oldpc || newline != getline(p, pcRel(oldpc, p))) + luaD_callhook(L, LUA_HOOKLINE, newline); + } +} + + +static void callTMres (lua_State *L, StkId res, const TValue *f, + const TValue *p1, const TValue *p2) { + ptrdiff_t result = savestack(L, res); + setobj2s(L, L->top, f); /* push function */ + setobj2s(L, L->top+1, p1); /* 1st argument */ + setobj2s(L, L->top+2, p2); /* 2nd argument */ + luaD_checkstack(L, 3); + L->top += 3; + luaD_call(L, L->top - 3, 1); + res = restorestack(L, result); + L->top--; + setobjs2s(L, res, L->top); +} + + + +static void callTM (lua_State *L, const TValue *f, const TValue *p1, + const TValue *p2, const TValue *p3) { + setobj2s(L, L->top, f); /* push function */ + setobj2s(L, L->top+1, p1); /* 1st argument */ + setobj2s(L, L->top+2, p2); /* 2nd argument */ + setobj2s(L, L->top+3, p3); /* 3th argument */ + luaD_checkstack(L, 4); + L->top += 4; + luaD_call(L, L->top - 4, 0); +} + + +void luaV_gettable (lua_State *L, const TValue *t, TValue *key, StkId val) { + int loop; + for (loop = 0; loop < MAXTAGLOOP; loop++) { + const TValue *tm; + if (ttistable(t)) { /* `t' is a table? */ + Table *h = hvalue(t); + const TValue *res = luaH_get(h, key); /* do a primitive get */ + if (!ttisnil(res) || /* result is no nil? */ + (tm = fasttm(L, h->metatable, TM_INDEX)) == NULL) { /* or no TM? */ + setobj2s(L, val, res); + return; + } + /* else will try the tag method */ + } + else if (ttisnil(tm = luaT_gettmbyobj(L, t, TM_INDEX))) + luaG_typeerror(L, t, "index"); + if (ttisfunction(tm)) { + callTMres(L, val, tm, t, key); + return; + } + t = tm; /* else repeat with `tm' */ + } + luaG_runerror(L, "loop in gettable"); +} + + +void luaV_settable (lua_State *L, const TValue *t, TValue *key, StkId val) { + int loop; + TValue temp; + for (loop = 0; loop < MAXTAGLOOP; loop++) { + const TValue *tm; + if (ttistable(t)) { /* `t' is a table? */ + Table *h = hvalue(t); + if (h->readonly) + luaG_runerror(L, "Attempt to modify a readonly table"); + TValue *oldval = luaH_set(L, h, key); /* do a primitive set */ + if (!ttisnil(oldval) || /* result is no nil? */ + (tm = fasttm(L, h->metatable, TM_NEWINDEX)) == NULL) { /* or no TM? */ + setobj2t(L, oldval, val); + h->flags = 0; + luaC_barriert(L, h, val); + return; + } + /* else will try the tag method */ + } + else if (ttisnil(tm = luaT_gettmbyobj(L, t, TM_NEWINDEX))) + luaG_typeerror(L, t, "index"); + if (ttisfunction(tm)) { + callTM(L, tm, t, key, val); + return; + } + /* else repeat with `tm' */ + setobj(L, &temp, tm); /* avoid pointing inside table (may rehash) */ + t = &temp; + } + luaG_runerror(L, "loop in settable"); +} + + +static int call_binTM (lua_State *L, const TValue *p1, const TValue *p2, + StkId res, TMS event) { + const TValue *tm = luaT_gettmbyobj(L, p1, event); /* try first operand */ + if (ttisnil(tm)) + tm = luaT_gettmbyobj(L, p2, event); /* try second operand */ + if (ttisnil(tm)) return 0; + callTMres(L, res, tm, p1, p2); + return 1; +} + + +static const TValue *get_compTM (lua_State *L, Table *mt1, Table *mt2, + TMS event) { + const TValue *tm1 = fasttm(L, mt1, event); + const TValue *tm2; + if (tm1 == NULL) return NULL; /* no metamethod */ + if (mt1 == mt2) return tm1; /* same metatables => same metamethods */ + tm2 = fasttm(L, mt2, event); + if (tm2 == NULL) return NULL; /* no metamethod */ + if (luaO_rawequalObj(tm1, tm2)) /* same metamethods? */ + return tm1; + return NULL; +} + + +static int call_orderTM (lua_State *L, const TValue *p1, const TValue *p2, + TMS event) { + const TValue *tm1 = luaT_gettmbyobj(L, p1, event); + const TValue *tm2; + if (ttisnil(tm1)) return -1; /* no metamethod? */ + tm2 = luaT_gettmbyobj(L, p2, event); + if (!luaO_rawequalObj(tm1, tm2)) /* different metamethods? */ + return -1; + callTMres(L, L->top, tm1, p1, p2); + return !l_isfalse(L->top); +} + + +static int l_strcmp (const TString *ls, const TString *rs) { + const char *l = getstr(ls); + size_t ll = ls->tsv.len; + const char *r = getstr(rs); + size_t lr = rs->tsv.len; + for (;;) { + int temp = strcoll(l, r); + if (temp != 0) return temp; + else { /* strings are equal up to a `\0' */ + size_t len = strlen(l); /* index of first `\0' in both strings */ + if (len == lr) /* r is finished? */ + return (len == ll) ? 0 : 1; + else if (len == ll) /* l is finished? */ + return -1; /* l is smaller than r (because r is not finished) */ + /* both strings longer than `len'; go on comparing (after the `\0') */ + len++; + l += len; ll -= len; r += len; lr -= len; + } + } +} + + +int luaV_lessthan (lua_State *L, const TValue *l, const TValue *r) { + int res; + if (ttype(l) != ttype(r)) + return luaG_ordererror(L, l, r); + else if (ttisnumber(l)) + return luai_numlt(nvalue(l), nvalue(r)); + else if (ttisstring(l)) + return l_strcmp(rawtsvalue(l), rawtsvalue(r)) < 0; + else if ((res = call_orderTM(L, l, r, TM_LT)) != -1) + return res; + return luaG_ordererror(L, l, r); +} + + +static int lessequal (lua_State *L, const TValue *l, const TValue *r) { + int res; + if (ttype(l) != ttype(r)) + return luaG_ordererror(L, l, r); + else if (ttisnumber(l)) + return luai_numle(nvalue(l), nvalue(r)); + else if (ttisstring(l)) + return l_strcmp(rawtsvalue(l), rawtsvalue(r)) <= 0; + else if ((res = call_orderTM(L, l, r, TM_LE)) != -1) /* first try `le' */ + return res; + else if ((res = call_orderTM(L, r, l, TM_LT)) != -1) /* else try `lt' */ + return !res; + return luaG_ordererror(L, l, r); +} + + +int luaV_equalval (lua_State *L, const TValue *t1, const TValue *t2) { + const TValue *tm; + lua_assert(ttype(t1) == ttype(t2)); + switch (ttype(t1)) { + case LUA_TNIL: return 1; + case LUA_TNUMBER: return luai_numeq(nvalue(t1), nvalue(t2)); + case LUA_TBOOLEAN: return bvalue(t1) == bvalue(t2); /* true must be 1 !! */ + case LUA_TLIGHTUSERDATA: return pvalue(t1) == pvalue(t2); + case LUA_TUSERDATA: { + if (uvalue(t1) == uvalue(t2)) return 1; + tm = get_compTM(L, uvalue(t1)->metatable, uvalue(t2)->metatable, + TM_EQ); + break; /* will try TM */ + } + case LUA_TTABLE: { + if (hvalue(t1) == hvalue(t2)) return 1; + tm = get_compTM(L, hvalue(t1)->metatable, hvalue(t2)->metatable, TM_EQ); + break; /* will try TM */ + } + default: return gcvalue(t1) == gcvalue(t2); + } + if (tm == NULL) return 0; /* no TM? */ + callTMres(L, L->top, tm, t1, t2); /* call TM */ + return !l_isfalse(L->top); +} + + +void luaV_concat (lua_State *L, int total, int last) { + do { + StkId top = L->base + last + 1; + int n = 2; /* number of elements handled in this pass (at least 2) */ + if (!(ttisstring(top-2) || ttisnumber(top-2)) || !tostring(L, top-1)) { + if (!call_binTM(L, top-2, top-1, top-2, TM_CONCAT)) + luaG_concaterror(L, top-2, top-1); + } else if (tsvalue(top-1)->len == 0) /* second op is empty? */ + (void)tostring(L, top - 2); /* result is first op (as string) */ + else { + /* at least two string values; get as many as possible */ + size_t tl = tsvalue(top-1)->len; + char *buffer; + int i; + /* collect total length */ + for (n = 1; n < total && tostring(L, top-n-1); n++) { + size_t l = tsvalue(top-n-1)->len; + if (l >= MAX_SIZET - tl) luaG_runerror(L, "string length overflow"); + tl += l; + } + buffer = luaZ_openspace(L, &G(L)->buff, tl); + tl = 0; + for (i=n; i>0; i--) { /* concat all strings */ + size_t l = tsvalue(top-i)->len; + memcpy(buffer+tl, svalue(top-i), l); + tl += l; + } + setsvalue2s(L, top-n, luaS_newlstr(L, buffer, tl)); + } + total -= n-1; /* got `n' strings to create 1 new */ + last -= n-1; + } while (total > 1); /* repeat until only 1 result left */ +} + + +static void Arith (lua_State *L, StkId ra, const TValue *rb, + const TValue *rc, TMS op) { + TValue tempb, tempc; + const TValue *b, *c; + if ((b = luaV_tonumber(rb, &tempb)) != NULL && + (c = luaV_tonumber(rc, &tempc)) != NULL) { + lua_Number nb = nvalue(b), nc = nvalue(c); + switch (op) { + case TM_ADD: setnvalue(ra, luai_numadd(nb, nc)); break; + case TM_SUB: setnvalue(ra, luai_numsub(nb, nc)); break; + case TM_MUL: setnvalue(ra, luai_nummul(nb, nc)); break; + case TM_DIV: setnvalue(ra, luai_numdiv(nb, nc)); break; + case TM_MOD: setnvalue(ra, luai_nummod(nb, nc)); break; + case TM_POW: setnvalue(ra, luai_numpow(nb, nc)); break; + case TM_UNM: setnvalue(ra, luai_numunm(nb)); break; + default: lua_assert(0); break; + } + } + else if (!call_binTM(L, rb, rc, ra, op)) + luaG_aritherror(L, rb, rc); +} + + + +/* +** some macros for common tasks in `luaV_execute' +*/ + +#define runtime_check(L, c) { if (!(c)) break; } + +#define RA(i) (base+GETARG_A(i)) +/* to be used after possible stack reallocation */ +#define RB(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgR, base+GETARG_B(i)) +#define RC(i) check_exp(getCMode(GET_OPCODE(i)) == OpArgR, base+GETARG_C(i)) +#define RKB(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgK, \ + ISK(GETARG_B(i)) ? k+INDEXK(GETARG_B(i)) : base+GETARG_B(i)) +#define RKC(i) check_exp(getCMode(GET_OPCODE(i)) == OpArgK, \ + ISK(GETARG_C(i)) ? k+INDEXK(GETARG_C(i)) : base+GETARG_C(i)) +#define KBx(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgK, k+GETARG_Bx(i)) + + +#define dojump(L,pc,i) {(pc) += (i); luai_threadyield(L);} + + +#define Protect(x) { L->savedpc = pc; {x;}; base = L->base; } + + +#define arith_op(op,tm) { \ + TValue *rb = RKB(i); \ + TValue *rc = RKC(i); \ + if (ttisnumber(rb) && ttisnumber(rc)) { \ + lua_Number nb = nvalue(rb), nc = nvalue(rc); \ + setnvalue(ra, op(nb, nc)); \ + } \ + else \ + Protect(Arith(L, ra, rb, rc, tm)); \ + } + + + +void luaV_execute (lua_State *L, int nexeccalls) { + LClosure *cl; + StkId base; + TValue *k; + const Instruction *pc; + reentry: /* entry point */ + lua_assert(isLua(L->ci)); + pc = L->savedpc; + cl = &clvalue(L->ci->func)->l; + base = L->base; + k = cl->p->k; + /* main loop of interpreter */ + for (;;) { + const Instruction i = *pc++; + StkId ra; + if ((L->hookmask & (LUA_MASKLINE | LUA_MASKCOUNT)) && + (--L->hookcount == 0 || L->hookmask & LUA_MASKLINE)) { + traceexec(L, pc); + if (L->status == LUA_YIELD) { /* did hook yield? */ + L->savedpc = pc - 1; + return; + } + base = L->base; + } + /* warning!! several calls may realloc the stack and invalidate `ra' */ + ra = RA(i); + lua_assert(base == L->base && L->base == L->ci->base); + lua_assert(base <= L->top && L->top <= L->stack + L->stacksize); + lua_assert(L->top == L->ci->top || luaG_checkopenop(i)); + switch (GET_OPCODE(i)) { + case OP_MOVE: { + setobjs2s(L, ra, RB(i)); + continue; + } + case OP_LOADK: { + setobj2s(L, ra, KBx(i)); + continue; + } + case OP_LOADBOOL: { + setbvalue(ra, GETARG_B(i)); + if (GETARG_C(i)) pc++; /* skip next instruction (if C) */ + continue; + } + case OP_LOADNIL: { + TValue *rb = RB(i); + do { + setnilvalue(rb--); + } while (rb >= ra); + continue; + } + case OP_GETUPVAL: { + int b = GETARG_B(i); + setobj2s(L, ra, cl->upvals[b]->v); + continue; + } + case OP_GETGLOBAL: { + TValue g; + TValue *rb = KBx(i); + sethvalue(L, &g, cl->env); + lua_assert(ttisstring(rb)); + Protect(luaV_gettable(L, &g, rb, ra)); + continue; + } + case OP_GETTABLE: { + Protect(luaV_gettable(L, RB(i), RKC(i), ra)); + continue; + } + case OP_SETGLOBAL: { + TValue g; + sethvalue(L, &g, cl->env); + lua_assert(ttisstring(KBx(i))); + Protect(luaV_settable(L, &g, KBx(i), ra)); + continue; + } + case OP_SETUPVAL: { + UpVal *uv = cl->upvals[GETARG_B(i)]; + setobj(L, uv->v, ra); + luaC_barrier(L, uv, ra); + continue; + } + case OP_SETTABLE: { + Protect(luaV_settable(L, ra, RKB(i), RKC(i))); + continue; + } + case OP_NEWTABLE: { + int b = GETARG_B(i); + int c = GETARG_C(i); + sethvalue(L, ra, luaH_new(L, luaO_fb2int(b), luaO_fb2int(c))); + Protect(luaC_checkGC(L)); + continue; + } + case OP_SELF: { + StkId rb = RB(i); + setobjs2s(L, ra+1, rb); + Protect(luaV_gettable(L, rb, RKC(i), ra)); + continue; + } + case OP_ADD: { + arith_op(luai_numadd, TM_ADD); + continue; + } + case OP_SUB: { + arith_op(luai_numsub, TM_SUB); + continue; + } + case OP_MUL: { + arith_op(luai_nummul, TM_MUL); + continue; + } + case OP_DIV: { + arith_op(luai_numdiv, TM_DIV); + continue; + } + case OP_MOD: { + arith_op(luai_nummod, TM_MOD); + continue; + } + case OP_POW: { + arith_op(luai_numpow, TM_POW); + continue; + } + case OP_UNM: { + TValue *rb = RB(i); + if (ttisnumber(rb)) { + lua_Number nb = nvalue(rb); + setnvalue(ra, luai_numunm(nb)); + } + else { + Protect(Arith(L, ra, rb, rb, TM_UNM)); + } + continue; + } + case OP_NOT: { + int res = l_isfalse(RB(i)); /* next assignment may change this value */ + setbvalue(ra, res); + continue; + } + case OP_LEN: { + const TValue *rb = RB(i); + switch (ttype(rb)) { + case LUA_TTABLE: { + setnvalue(ra, cast_num(luaH_getn(hvalue(rb)))); + break; + } + case LUA_TSTRING: { + setnvalue(ra, cast_num(tsvalue(rb)->len)); + break; + } + default: { /* try metamethod */ + Protect( + if (!call_binTM(L, rb, luaO_nilobject, ra, TM_LEN)) + luaG_typeerror(L, rb, "get length of"); + ) + } + } + continue; + } + case OP_CONCAT: { + int b = GETARG_B(i); + int c = GETARG_C(i); + Protect(luaV_concat(L, c-b+1, c); luaC_checkGC(L)); + setobjs2s(L, RA(i), base+b); + continue; + } + case OP_JMP: { + dojump(L, pc, GETARG_sBx(i)); + continue; + } + case OP_EQ: { + TValue *rb = RKB(i); + TValue *rc = RKC(i); + Protect( + if (equalobj(L, rb, rc) == GETARG_A(i)) + dojump(L, pc, GETARG_sBx(*pc)); + ) + pc++; + continue; + } + case OP_LT: { + Protect( + if (luaV_lessthan(L, RKB(i), RKC(i)) == GETARG_A(i)) + dojump(L, pc, GETARG_sBx(*pc)); + ) + pc++; + continue; + } + case OP_LE: { + Protect( + if (lessequal(L, RKB(i), RKC(i)) == GETARG_A(i)) + dojump(L, pc, GETARG_sBx(*pc)); + ) + pc++; + continue; + } + case OP_TEST: { + if (l_isfalse(ra) != GETARG_C(i)) + dojump(L, pc, GETARG_sBx(*pc)); + pc++; + continue; + } + case OP_TESTSET: { + TValue *rb = RB(i); + if (l_isfalse(rb) != GETARG_C(i)) { + setobjs2s(L, ra, rb); + dojump(L, pc, GETARG_sBx(*pc)); + } + pc++; + continue; + } + case OP_CALL: { + int b = GETARG_B(i); + int nresults = GETARG_C(i) - 1; + if (b != 0) L->top = ra+b; /* else previous instruction set top */ + L->savedpc = pc; + switch (luaD_precall(L, ra, nresults)) { + case PCRLUA: { + nexeccalls++; + goto reentry; /* restart luaV_execute over new Lua function */ + } + case PCRC: { + /* it was a C function (`precall' called it); adjust results */ + if (nresults >= 0) L->top = L->ci->top; + base = L->base; + continue; + } + default: { + return; /* yield */ + } + } + } + case OP_TAILCALL: { + int b = GETARG_B(i); + if (b != 0) L->top = ra+b; /* else previous instruction set top */ + L->savedpc = pc; + lua_assert(GETARG_C(i) - 1 == LUA_MULTRET); + switch (luaD_precall(L, ra, LUA_MULTRET)) { + case PCRLUA: { + /* tail call: put new frame in place of previous one */ + CallInfo *ci = L->ci - 1; /* previous frame */ + int aux; + StkId func = ci->func; + StkId pfunc = (ci+1)->func; /* previous function index */ + if (L->openupval) luaF_close(L, ci->base); + L->base = ci->base = ci->func + ((ci+1)->base - pfunc); + for (aux = 0; pfunc+aux < L->top; aux++) /* move frame down */ + setobjs2s(L, func+aux, pfunc+aux); + ci->top = L->top = func+aux; /* correct top */ + lua_assert(L->top == L->base + clvalue(func)->l.p->maxstacksize); + ci->savedpc = L->savedpc; + ci->tailcalls++; /* one more call lost */ + L->ci--; /* remove new frame */ + goto reentry; + } + case PCRC: { /* it was a C function (`precall' called it) */ + base = L->base; + continue; + } + default: { + return; /* yield */ + } + } + } + case OP_RETURN: { + int b = GETARG_B(i); + if (b != 0) L->top = ra+b-1; + if (L->openupval) luaF_close(L, base); + L->savedpc = pc; + b = luaD_poscall(L, ra); + if (--nexeccalls == 0) /* was previous function running `here'? */ + return; /* no: return */ + else { /* yes: continue its execution */ + if (b) L->top = L->ci->top; + lua_assert(isLua(L->ci)); + lua_assert(GET_OPCODE(*((L->ci)->savedpc - 1)) == OP_CALL); + goto reentry; + } + } + case OP_FORLOOP: { + lua_Number step = nvalue(ra+2); + lua_Number idx = luai_numadd(nvalue(ra), step); /* increment index */ + lua_Number limit = nvalue(ra+1); + if (luai_numlt(0, step) ? luai_numle(idx, limit) + : luai_numle(limit, idx)) { + dojump(L, pc, GETARG_sBx(i)); /* jump back */ + setnvalue(ra, idx); /* update internal index... */ + setnvalue(ra+3, idx); /* ...and external index */ + } + continue; + } + case OP_FORPREP: { + const TValue *init = ra; + const TValue *plimit = ra+1; + const TValue *pstep = ra+2; + L->savedpc = pc; /* next steps may throw errors */ + if (!tonumber(init, ra)) + luaG_runerror(L, LUA_QL("for") " initial value must be a number"); + else if (!tonumber(plimit, ra+1)) + luaG_runerror(L, LUA_QL("for") " limit must be a number"); + else if (!tonumber(pstep, ra+2)) + luaG_runerror(L, LUA_QL("for") " step must be a number"); + setnvalue(ra, luai_numsub(nvalue(ra), nvalue(pstep))); + dojump(L, pc, GETARG_sBx(i)); + continue; + } + case OP_TFORLOOP: { + StkId cb = ra + 3; /* call base */ + setobjs2s(L, cb+2, ra+2); + setobjs2s(L, cb+1, ra+1); + setobjs2s(L, cb, ra); + L->top = cb+3; /* func. + 2 args (state and index) */ + Protect(luaD_call(L, cb, GETARG_C(i))); + L->top = L->ci->top; + cb = RA(i) + 3; /* previous call may change the stack */ + if (!ttisnil(cb)) { /* continue loop? */ + setobjs2s(L, cb-1, cb); /* save control variable */ + dojump(L, pc, GETARG_sBx(*pc)); /* jump back */ + } + pc++; + continue; + } + case OP_SETLIST: { + int n = GETARG_B(i); + int c = GETARG_C(i); + int last; + Table *h; + if (n == 0) { + n = cast_int(L->top - ra) - 1; + L->top = L->ci->top; + } + if (c == 0) c = cast_int(*pc++); + runtime_check(L, ttistable(ra)); + h = hvalue(ra); + last = ((c-1)*LFIELDS_PER_FLUSH) + n; + if (last > h->sizearray) /* needs more space? */ + luaH_resizearray(L, h, last); /* pre-alloc it at once */ + for (; n > 0; n--) { + TValue *val = ra+n; + setobj2t(L, luaH_setnum(L, h, last--), val); + luaC_barriert(L, h, val); + } + continue; + } + case OP_CLOSE: { + luaF_close(L, ra); + continue; + } + case OP_CLOSURE: { + Proto *p; + Closure *ncl; + int nup, j; + p = cl->p->p[GETARG_Bx(i)]; + nup = p->nups; + ncl = luaF_newLclosure(L, nup, cl->env); + ncl->l.p = p; + for (j=0; jl.upvals[j] = cl->upvals[GETARG_B(*pc)]; + else { + lua_assert(GET_OPCODE(*pc) == OP_MOVE); + ncl->l.upvals[j] = luaF_findupval(L, base + GETARG_B(*pc)); + } + } + setclvalue(L, ra, ncl); + Protect(luaC_checkGC(L)); + continue; + } + case OP_VARARG: { + int b = GETARG_B(i) - 1; + int j; + CallInfo *ci = L->ci; + int n = cast_int(ci->base - ci->func) - cl->p->numparams - 1; + if (b == LUA_MULTRET) { + Protect(luaD_checkstack(L, n)); + ra = RA(i); /* previous call may change the stack */ + b = n; + L->top = ra + n; + } + for (j = 0; j < b; j++) { + if (j < n) { + setobjs2s(L, ra + j, ci->base - n + j); + } + else { + setnilvalue(ra + j); + } + } + continue; + } + } + } +} + diff --git a/deps/lua/src/lvm.h b/deps/lua/src/lvm.h new file mode 100644 index 0000000..bfe4f56 --- /dev/null +++ b/deps/lua/src/lvm.h @@ -0,0 +1,36 @@ +/* +** $Id: lvm.h,v 2.5.1.1 2007/12/27 13:02:25 roberto Exp $ +** Lua virtual machine +** See Copyright Notice in lua.h +*/ + +#ifndef lvm_h +#define lvm_h + + +#include "ldo.h" +#include "lobject.h" +#include "ltm.h" + + +#define tostring(L,o) ((ttype(o) == LUA_TSTRING) || (luaV_tostring(L, o))) + +#define tonumber(o,n) (ttype(o) == LUA_TNUMBER || \ + (((o) = luaV_tonumber(o,n)) != NULL)) + +#define equalobj(L,o1,o2) \ + (ttype(o1) == ttype(o2) && luaV_equalval(L, o1, o2)) + + +LUAI_FUNC int luaV_lessthan (lua_State *L, const TValue *l, const TValue *r); +LUAI_FUNC int luaV_equalval (lua_State *L, const TValue *t1, const TValue *t2); +LUAI_FUNC const TValue *luaV_tonumber (const TValue *obj, TValue *n); +LUAI_FUNC int luaV_tostring (lua_State *L, StkId obj); +LUAI_FUNC void luaV_gettable (lua_State *L, const TValue *t, TValue *key, + StkId val); +LUAI_FUNC void luaV_settable (lua_State *L, const TValue *t, TValue *key, + StkId val); +LUAI_FUNC void luaV_execute (lua_State *L, int nexeccalls); +LUAI_FUNC void luaV_concat (lua_State *L, int total, int last); + +#endif diff --git a/deps/lua/src/lzio.c b/deps/lua/src/lzio.c new file mode 100644 index 0000000..293edd5 --- /dev/null +++ b/deps/lua/src/lzio.c @@ -0,0 +1,82 @@ +/* +** $Id: lzio.c,v 1.31.1.1 2007/12/27 13:02:25 roberto Exp $ +** a generic input stream interface +** See Copyright Notice in lua.h +*/ + + +#include + +#define lzio_c +#define LUA_CORE + +#include "lua.h" + +#include "llimits.h" +#include "lmem.h" +#include "lstate.h" +#include "lzio.h" + + +int luaZ_fill (ZIO *z) { + size_t size; + lua_State *L = z->L; + const char *buff; + lua_unlock(L); + buff = z->reader(L, z->data, &size); + lua_lock(L); + if (buff == NULL || size == 0) return EOZ; + z->n = size - 1; + z->p = buff; + return char2int(*(z->p++)); +} + + +int luaZ_lookahead (ZIO *z) { + if (z->n == 0) { + if (luaZ_fill(z) == EOZ) + return EOZ; + else { + z->n++; /* luaZ_fill removed first byte; put back it */ + z->p--; + } + } + return char2int(*z->p); +} + + +void luaZ_init (lua_State *L, ZIO *z, lua_Reader reader, void *data) { + z->L = L; + z->reader = reader; + z->data = data; + z->n = 0; + z->p = NULL; +} + + +/* --------------------------------------------------------------- read --- */ +size_t luaZ_read (ZIO *z, void *b, size_t n) { + while (n) { + size_t m; + if (luaZ_lookahead(z) == EOZ) + return n; /* return number of missing bytes */ + m = (n <= z->n) ? n : z->n; /* min. between n and z->n */ + memcpy(b, z->p, m); + z->n -= m; + z->p += m; + b = (char *)b + m; + n -= m; + } + return 0; +} + +/* ------------------------------------------------------------------------ */ +char *luaZ_openspace (lua_State *L, Mbuffer *buff, size_t n) { + if (n > buff->buffsize) { + if (n < LUA_MINBUFFER) n = LUA_MINBUFFER; + luaZ_resizebuffer(L, buff, n); + } + return buff->buffer; +} + + diff --git a/deps/lua/src/lzio.h b/deps/lua/src/lzio.h new file mode 100644 index 0000000..51d695d --- /dev/null +++ b/deps/lua/src/lzio.h @@ -0,0 +1,67 @@ +/* +** $Id: lzio.h,v 1.21.1.1 2007/12/27 13:02:25 roberto Exp $ +** Buffered streams +** See Copyright Notice in lua.h +*/ + + +#ifndef lzio_h +#define lzio_h + +#include "lua.h" + +#include "lmem.h" + + +#define EOZ (-1) /* end of stream */ + +typedef struct Zio ZIO; + +#define char2int(c) cast(int, cast(unsigned char, (c))) + +#define zgetc(z) (((z)->n--)>0 ? char2int(*(z)->p++) : luaZ_fill(z)) + +typedef struct Mbuffer { + char *buffer; + size_t n; + size_t buffsize; +} Mbuffer; + +#define luaZ_initbuffer(L, buff) ((buff)->buffer = NULL, (buff)->buffsize = 0) + +#define luaZ_buffer(buff) ((buff)->buffer) +#define luaZ_sizebuffer(buff) ((buff)->buffsize) +#define luaZ_bufflen(buff) ((buff)->n) + +#define luaZ_resetbuffer(buff) ((buff)->n = 0) + + +#define luaZ_resizebuffer(L, buff, size) \ + (luaM_reallocvector(L, (buff)->buffer, (buff)->buffsize, size, char), \ + (buff)->buffsize = size) + +#define luaZ_freebuffer(L, buff) luaZ_resizebuffer(L, buff, 0) + + +LUAI_FUNC char *luaZ_openspace (lua_State *L, Mbuffer *buff, size_t n); +LUAI_FUNC void luaZ_init (lua_State *L, ZIO *z, lua_Reader reader, + void *data); +LUAI_FUNC size_t luaZ_read (ZIO* z, void* b, size_t n); /* read next n bytes */ +LUAI_FUNC int luaZ_lookahead (ZIO *z); + + + +/* --------- Private Part ------------------ */ + +struct Zio { + size_t n; /* bytes still unread */ + const char *p; /* current position in buffer */ + lua_Reader reader; + void* data; /* additional data */ + lua_State *L; /* Lua state (for reader) */ +}; + + +LUAI_FUNC int luaZ_fill (ZIO *z); + +#endif diff --git a/deps/lua/src/print.c b/deps/lua/src/print.c new file mode 100644 index 0000000..e240cfc --- /dev/null +++ b/deps/lua/src/print.c @@ -0,0 +1,227 @@ +/* +** $Id: print.c,v 1.55a 2006/05/31 13:30:05 lhf Exp $ +** print bytecodes +** See Copyright Notice in lua.h +*/ + +#include +#include + +#define luac_c +#define LUA_CORE + +#include "ldebug.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lundump.h" + +#define PrintFunction luaU_print + +#define Sizeof(x) ((int)sizeof(x)) +#define VOID(p) ((const void*)(p)) + +static void PrintString(const TString* ts) +{ + const char* s=getstr(ts); + size_t i,n=ts->tsv.len; + putchar('"'); + for (i=0; ik[i]; + switch (ttype(o)) + { + case LUA_TNIL: + printf("nil"); + break; + case LUA_TBOOLEAN: + printf(bvalue(o) ? "true" : "false"); + break; + case LUA_TNUMBER: + printf(LUA_NUMBER_FMT,nvalue(o)); + break; + case LUA_TSTRING: + PrintString(rawtsvalue(o)); + break; + default: /* cannot happen */ + printf("? type=%d",ttype(o)); + break; + } +} + +static void PrintCode(const Proto* f) +{ + const Instruction* code=f->code; + int pc,n=f->sizecode; + for (pc=0; pc0) printf("[%d]\t",line); else printf("[-]\t"); + printf("%-9s\t",luaP_opnames[o]); + switch (getOpMode(o)) + { + case iABC: + printf("%d",a); + if (getBMode(o)!=OpArgN) printf(" %d",ISK(b) ? (-1-INDEXK(b)) : b); + if (getCMode(o)!=OpArgN) printf(" %d",ISK(c) ? (-1-INDEXK(c)) : c); + break; + case iABx: + if (getBMode(o)==OpArgK) printf("%d %d",a,-1-bx); else printf("%d %d",a,bx); + break; + case iAsBx: + if (o==OP_JMP) printf("%d",sbx); else printf("%d %d",a,sbx); + break; + } + switch (o) + { + case OP_LOADK: + printf("\t; "); PrintConstant(f,bx); + break; + case OP_GETUPVAL: + case OP_SETUPVAL: + printf("\t; %s", (f->sizeupvalues>0) ? getstr(f->upvalues[b]) : "-"); + break; + case OP_GETGLOBAL: + case OP_SETGLOBAL: + printf("\t; %s",svalue(&f->k[bx])); + break; + case OP_GETTABLE: + case OP_SELF: + if (ISK(c)) { printf("\t; "); PrintConstant(f,INDEXK(c)); } + break; + case OP_SETTABLE: + case OP_ADD: + case OP_SUB: + case OP_MUL: + case OP_DIV: + case OP_POW: + case OP_EQ: + case OP_LT: + case OP_LE: + if (ISK(b) || ISK(c)) + { + printf("\t; "); + if (ISK(b)) PrintConstant(f,INDEXK(b)); else printf("-"); + printf(" "); + if (ISK(c)) PrintConstant(f,INDEXK(c)); else printf("-"); + } + break; + case OP_JMP: + case OP_FORLOOP: + case OP_FORPREP: + printf("\t; to %d",sbx+pc+2); + break; + case OP_CLOSURE: + printf("\t; %p",VOID(f->p[bx])); + break; + case OP_SETLIST: + if (c==0) printf("\t; %d",(int)code[++pc]); + else printf("\t; %d",c); + break; + default: + break; + } + printf("\n"); + } +} + +#define SS(x) (x==1)?"":"s" +#define S(x) x,SS(x) + +static void PrintHeader(const Proto* f) +{ + const char* s=getstr(f->source); + if (*s=='@' || *s=='=') + s++; + else if (*s==LUA_SIGNATURE[0]) + s="(bstring)"; + else + s="(string)"; + printf("\n%s <%s:%d,%d> (%d instruction%s, %d bytes at %p)\n", + (f->linedefined==0)?"main":"function",s, + f->linedefined,f->lastlinedefined, + S(f->sizecode),f->sizecode*Sizeof(Instruction),VOID(f)); + printf("%d%s param%s, %d slot%s, %d upvalue%s, ", + f->numparams,f->is_vararg?"+":"",SS(f->numparams), + S(f->maxstacksize),S(f->nups)); + printf("%d local%s, %d constant%s, %d function%s\n", + S(f->sizelocvars),S(f->sizek),S(f->sizep)); +} + +static void PrintConstants(const Proto* f) +{ + int i,n=f->sizek; + printf("constants (%d) for %p:\n",n,VOID(f)); + for (i=0; isizelocvars; + printf("locals (%d) for %p:\n",n,VOID(f)); + for (i=0; ilocvars[i].varname),f->locvars[i].startpc+1,f->locvars[i].endpc+1); + } +} + +static void PrintUpvalues(const Proto* f) +{ + int i,n=f->sizeupvalues; + printf("upvalues (%d) for %p:\n",n,VOID(f)); + if (f->upvalues==NULL) return; + for (i=0; iupvalues[i])); + } +} + +void PrintFunction(const Proto* f, int full) +{ + int i,n=f->sizep; + PrintHeader(f); + PrintCode(f); + if (full) + { + PrintConstants(f); + PrintLocals(f); + PrintUpvalues(f); + } + for (i=0; ip[i],full); +} diff --git a/deps/lua/src/strbuf.c b/deps/lua/src/strbuf.c new file mode 100644 index 0000000..775e8ba --- /dev/null +++ b/deps/lua/src/strbuf.c @@ -0,0 +1,198 @@ +/* strbuf - String buffer routines + * + * Copyright (c) 2010-2012 Mark Pulford + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "strbuf.h" + +static void die(const char *fmt, ...) +{ + va_list arg; + + va_start(arg, fmt); + vfprintf(stderr, fmt, arg); + va_end(arg); + fprintf(stderr, "\n"); + + abort(); +} + +void strbuf_init(strbuf_t *s, size_t len) +{ + size_t size; + + if (!len) + size = STRBUF_DEFAULT_SIZE; + else + size = len + 1; + if (size < len) + die("Overflow, len: %zu", len); + s->buf = NULL; + s->size = size; + s->length = 0; + s->dynamic = 0; + s->reallocs = 0; + s->debug = 0; + + s->buf = malloc(size); + if (!s->buf) + die("Out of memory"); + + strbuf_ensure_null(s); +} + +strbuf_t *strbuf_new(size_t len) +{ + strbuf_t *s; + + s = malloc(sizeof(strbuf_t)); + if (!s) + die("Out of memory"); + + strbuf_init(s, len); + + /* Dynamic strbuf allocation / deallocation */ + s->dynamic = 1; + + return s; +} + +static inline void debug_stats(strbuf_t *s) +{ + if (s->debug) { + fprintf(stderr, "strbuf(%lx) reallocs: %d, length: %zd, size: %zd\n", + (long)s, s->reallocs, s->length, s->size); + } +} + +/* If strbuf_t has not been dynamically allocated, strbuf_free() can + * be called any number of times strbuf_init() */ +void strbuf_free(strbuf_t *s) +{ + debug_stats(s); + + if (s->buf) { + free(s->buf); + s->buf = NULL; + } + if (s->dynamic) + free(s); +} + +char *strbuf_free_to_string(strbuf_t *s, size_t *len) +{ + char *buf; + + debug_stats(s); + + strbuf_ensure_null(s); + + buf = s->buf; + if (len) + *len = s->length; + + if (s->dynamic) + free(s); + + return buf; +} + +static size_t calculate_new_size(strbuf_t *s, size_t len) +{ + size_t reqsize, newsize; + + if (len <= 0) + die("BUG: Invalid strbuf length requested"); + + /* Ensure there is room for optional NULL termination */ + reqsize = len + 1; + if (reqsize < len) + die("Overflow, len: %zu", len); + + /* If the user has requested to shrink the buffer, do it exactly */ + if (s->size > reqsize) + return reqsize; + + newsize = s->size; + if (reqsize >= SIZE_MAX / 2) { + newsize = reqsize; + } else { + /* Exponential sizing */ + while (newsize < reqsize) + newsize *= 2; + } + + if (newsize < reqsize) + die("BUG: strbuf length would overflow, len: %zu", len); + + return newsize; +} + + +/* Ensure strbuf can handle a string length bytes long (ignoring NULL + * optional termination). */ +void strbuf_resize(strbuf_t *s, size_t len) +{ + size_t newsize; + + newsize = calculate_new_size(s, len); + + if (s->debug > 1) { + fprintf(stderr, "strbuf(%lx) resize: %zd => %zd\n", + (long)s, s->size, newsize); + } + + s->size = newsize; + s->buf = realloc(s->buf, s->size); + if (!s->buf) + die("Out of memory, len: %zu", len); + s->reallocs++; +} + +void strbuf_append_string(strbuf_t *s, const char *str) +{ + int i; + size_t space; + + space = strbuf_empty_length(s); + + for (i = 0; str[i]; i++) { + if (space < 1) { + strbuf_resize(s, s->length + 1); + space = strbuf_empty_length(s); + } + + s->buf[s->length] = str[i]; + s->length++; + space--; + } +} + + +/* vi:ai et sw=4 ts=4: + */ diff --git a/deps/lua/src/strbuf.h b/deps/lua/src/strbuf.h new file mode 100644 index 0000000..c10f83f --- /dev/null +++ b/deps/lua/src/strbuf.h @@ -0,0 +1,146 @@ +/* strbuf - String buffer routines + * + * Copyright (c) 2010-2012 Mark Pulford + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +/* Size: Total bytes allocated to *buf + * Length: String length, excluding optional NULL terminator. + * Dynamic: True if created via strbuf_new() + */ + +typedef struct { + char *buf; + size_t size; + size_t length; + int dynamic; + int reallocs; + int debug; +} strbuf_t; + +#ifndef STRBUF_DEFAULT_SIZE +#define STRBUF_DEFAULT_SIZE 1023 +#endif + +/* Initialise */ +extern strbuf_t *strbuf_new(size_t len); +extern void strbuf_init(strbuf_t *s, size_t len); + +/* Release */ +extern void strbuf_free(strbuf_t *s); +extern char *strbuf_free_to_string(strbuf_t *s, size_t *len); + +/* Management */ +extern void strbuf_resize(strbuf_t *s, size_t len); +static size_t strbuf_empty_length(strbuf_t *s); +static size_t strbuf_length(strbuf_t *s); +static char *strbuf_string(strbuf_t *s, size_t *len); +static void strbuf_ensure_empty_length(strbuf_t *s, size_t len); +static char *strbuf_empty_ptr(strbuf_t *s); +static void strbuf_extend_length(strbuf_t *s, size_t len); + +/* Update */ +static void strbuf_append_mem(strbuf_t *s, const char *c, size_t len); +extern void strbuf_append_string(strbuf_t *s, const char *str); +static void strbuf_append_char(strbuf_t *s, const char c); +static void strbuf_ensure_null(strbuf_t *s); + +/* Reset string for before use */ +static inline void strbuf_reset(strbuf_t *s) +{ + s->length = 0; +} + +static inline int strbuf_allocated(strbuf_t *s) +{ + return s->buf != NULL; +} + +/* Return bytes remaining in the string buffer + * Ensure there is space for a NULL terminator. */ +static inline size_t strbuf_empty_length(strbuf_t *s) +{ + return s->size - s->length - 1; +} + +static inline void strbuf_ensure_empty_length(strbuf_t *s, size_t len) +{ + if (len > strbuf_empty_length(s)) + strbuf_resize(s, s->length + len); +} + +static inline char *strbuf_empty_ptr(strbuf_t *s) +{ + return s->buf + s->length; +} + +static inline void strbuf_extend_length(strbuf_t *s, size_t len) +{ + s->length += len; +} + +static inline size_t strbuf_length(strbuf_t *s) +{ + return s->length; +} + +static inline void strbuf_append_char(strbuf_t *s, const char c) +{ + strbuf_ensure_empty_length(s, 1); + s->buf[s->length++] = c; +} + +static inline void strbuf_append_char_unsafe(strbuf_t *s, const char c) +{ + s->buf[s->length++] = c; +} + +static inline void strbuf_append_mem(strbuf_t *s, const char *c, size_t len) +{ + strbuf_ensure_empty_length(s, len); + memcpy(s->buf + s->length, c, len); + s->length += len; +} + +static inline void strbuf_append_mem_unsafe(strbuf_t *s, const char *c, size_t len) +{ + memcpy(s->buf + s->length, c, len); + s->length += len; +} + +static inline void strbuf_ensure_null(strbuf_t *s) +{ + s->buf[s->length] = 0; +} + +static inline char *strbuf_string(strbuf_t *s, size_t *len) +{ + if (len) + *len = s->length; + + return s->buf; +} + +/* vi:ai et sw=4 ts=4: + */ diff --git a/deps/lua/test/README b/deps/lua/test/README new file mode 100644 index 0000000..0c7f38b --- /dev/null +++ b/deps/lua/test/README @@ -0,0 +1,26 @@ +These are simple tests for Lua. Some of them contain useful code. +They are meant to be run to make sure Lua is built correctly and also +to be read, to see how Lua programs look. + +Here is a one-line summary of each program: + + bisect.lua bisection method for solving non-linear equations + cf.lua temperature conversion table (celsius to farenheit) + echo.lua echo command line arguments + env.lua environment variables as automatic global variables + factorial.lua factorial without recursion + fib.lua fibonacci function with cache + fibfor.lua fibonacci numbers with coroutines and generators + globals.lua report global variable usage + hello.lua the first program in every language + life.lua Conway's Game of Life + luac.lua bare-bones luac + printf.lua an implementation of printf + readonly.lua make global variables readonly + sieve.lua the sieve of of Eratosthenes programmed with coroutines + sort.lua two implementations of a sort function + table.lua make table, grouping all data for the same item + trace-calls.lua trace calls + trace-globals.lua trace assigments to global variables + xd.lua hex dump + diff --git a/deps/lua/test/bisect.lua b/deps/lua/test/bisect.lua new file mode 100644 index 0000000..f91e69b --- /dev/null +++ b/deps/lua/test/bisect.lua @@ -0,0 +1,27 @@ +-- bisection method for solving non-linear equations + +delta=1e-6 -- tolerance + +function bisect(f,a,b,fa,fb) + local c=(a+b)/2 + io.write(n," c=",c," a=",a," b=",b,"\n") + if c==a or c==b or math.abs(a-b) posted to lua-l +-- modified to use ANSI terminal escape sequences +-- modified to use for instead of while + +local write=io.write + +ALIVE="¥" DEAD="þ" +ALIVE="O" DEAD="-" + +function delay() -- NOTE: SYSTEM-DEPENDENT, adjust as necessary + for i=1,10000 do end + -- local i=os.clock()+1 while(os.clock() 0 do + local xm1,x,xp1,xi=self.w-1,self.w,1,self.w + while xi > 0 do + local sum = self[ym1][xm1] + self[ym1][x] + self[ym1][xp1] + + self[y][xm1] + self[y][xp1] + + self[yp1][xm1] + self[yp1][x] + self[yp1][xp1] + next[y][x] = ((sum==2) and self[y][x]) or ((sum==3) and 1) or 0 + xm1,x,xp1,xi = x,xp1,xp1+1,xi-1 + end + ym1,y,yp1,yi = y,yp1,yp1+1,yi-1 + end +end + +-- output the array to screen +function _CELLS:draw() + local out="" -- accumulate to reduce flicker + for y=1,self.h do + for x=1,self.w do + out=out..(((self[y][x]>0) and ALIVE) or DEAD) + end + out=out.."\n" + end + write(out) +end + +-- constructor +function CELLS(w,h) + local c = ARRAY2D(w,h) + c.spawn = _CELLS.spawn + c.evolve = _CELLS.evolve + c.draw = _CELLS.draw + return c +end + +-- +-- shapes suitable for use with spawn() above +-- +HEART = { 1,0,1,1,0,1,1,1,1; w=3,h=3 } +GLIDER = { 0,0,1,1,0,1,0,1,1; w=3,h=3 } +EXPLODE = { 0,1,0,1,1,1,1,0,1,0,1,0; w=3,h=4 } +FISH = { 0,1,1,1,1,1,0,0,0,1,0,0,0,0,1,1,0,0,1,0; w=5,h=4 } +BUTTERFLY = { 1,0,0,0,1,0,1,1,1,0,1,0,0,0,1,1,0,1,0,1,1,0,0,0,1; w=5,h=5 } + +-- the main routine +function LIFE(w,h) + -- create two arrays + local thisgen = CELLS(w,h) + local nextgen = CELLS(w,h) + + -- create some life + -- about 1000 generations of fun, then a glider steady-state + thisgen:spawn(GLIDER,5,4) + thisgen:spawn(EXPLODE,25,10) + thisgen:spawn(FISH,4,12) + + -- run until break + local gen=1 + write("\027[2J") -- ANSI clear screen + while 1 do + thisgen:evolve(nextgen) + thisgen,nextgen = nextgen,thisgen + write("\027[H") -- ANSI home cursor + thisgen:draw() + write("Life - generation ",gen,"\n") + gen=gen+1 + if gen>2000 then break end + --delay() -- no delay + end +end + +LIFE(40,20) diff --git a/deps/lua/test/luac.lua b/deps/lua/test/luac.lua new file mode 100644 index 0000000..96a0a97 --- /dev/null +++ b/deps/lua/test/luac.lua @@ -0,0 +1,7 @@ +-- bare-bones luac in Lua +-- usage: lua luac.lua file.lua + +assert(arg[1]~=nil and arg[2]==nil,"usage: lua luac.lua file.lua") +f=assert(io.open("luac.out","wb")) +assert(f:write(string.dump(assert(loadfile(arg[1]))))) +assert(f:close()) diff --git a/deps/lua/test/printf.lua b/deps/lua/test/printf.lua new file mode 100644 index 0000000..58c63ff --- /dev/null +++ b/deps/lua/test/printf.lua @@ -0,0 +1,7 @@ +-- an implementation of printf + +function printf(...) + io.write(string.format(...)) +end + +printf("Hello %s from %s on %s\n",os.getenv"USER" or "there",_VERSION,os.date()) diff --git a/deps/lua/test/readonly.lua b/deps/lua/test/readonly.lua new file mode 100644 index 0000000..85c0b4e --- /dev/null +++ b/deps/lua/test/readonly.lua @@ -0,0 +1,12 @@ +-- make global variables readonly + +local f=function (t,i) error("cannot redefine global variable `"..i.."'",2) end +local g={} +local G=getfenv() +setmetatable(g,{__index=G,__newindex=f}) +setfenv(1,g) + +-- an example +rawset(g,"x",3) +x=2 +y=1 -- cannot redefine `y' diff --git a/deps/lua/test/sieve.lua b/deps/lua/test/sieve.lua new file mode 100644 index 0000000..0871bb2 --- /dev/null +++ b/deps/lua/test/sieve.lua @@ -0,0 +1,29 @@ +-- the sieve of of Eratosthenes programmed with coroutines +-- typical usage: lua -e N=1000 sieve.lua | column + +-- generate all the numbers from 2 to n +function gen (n) + return coroutine.wrap(function () + for i=2,n do coroutine.yield(i) end + end) +end + +-- filter the numbers generated by `g', removing multiples of `p' +function filter (p, g) + return coroutine.wrap(function () + while 1 do + local n = g() + if n == nil then return end + if math.mod(n, p) ~= 0 then coroutine.yield(n) end + end + end) +end + +N=N or 1000 -- from command line +x = gen(N) -- generate primes up to N +while 1 do + local n = x() -- pick a number until done + if n == nil then break end + print(n) -- must be a prime number + x = filter(n, x) -- now remove its multiples +end diff --git a/deps/lua/test/sort.lua b/deps/lua/test/sort.lua new file mode 100644 index 0000000..0bcb15f --- /dev/null +++ b/deps/lua/test/sort.lua @@ -0,0 +1,66 @@ +-- two implementations of a sort function +-- this is an example only. Lua has now a built-in function "sort" + +-- extracted from Programming Pearls, page 110 +function qsort(x,l,u,f) + if ly end) + show("after reverse selection sort",x) + qsort(x,1,n,function (x,y) return x>> ",string.rep(" ",level)) + if t~=nil and t.currentline>=0 then io.write(t.short_src,":",t.currentline," ") end + t=debug.getinfo(2) + if event=="call" then + level=level+1 + else + level=level-1 if level<0 then level=0 end + end + if t.what=="main" then + if event=="call" then + io.write("begin ",t.short_src) + else + io.write("end ",t.short_src) + end + elseif t.what=="Lua" then +-- table.foreach(t,print) + io.write(event," ",t.name or "(Lua)"," <",t.linedefined,":",t.short_src,">") + else + io.write(event," ",t.name or "(C)"," [",t.what,"] ") + end + io.write("\n") +end + +debug.sethook(hook,"cr") +level=0 diff --git a/deps/lua/test/trace-globals.lua b/deps/lua/test/trace-globals.lua new file mode 100644 index 0000000..295e670 --- /dev/null +++ b/deps/lua/test/trace-globals.lua @@ -0,0 +1,38 @@ +-- trace assigments to global variables + +do + -- a tostring that quotes strings. note the use of the original tostring. + local _tostring=tostring + local tostring=function(a) + if type(a)=="string" then + return string.format("%q",a) + else + return _tostring(a) + end + end + + local log=function (name,old,new) + local t=debug.getinfo(3,"Sl") + local line=t.currentline + io.write(t.short_src) + if line>=0 then io.write(":",line) end + io.write(": ",name," is now ",tostring(new)," (was ",tostring(old),")","\n") + end + + local g={} + local set=function (t,name,value) + log(name,g[name],value) + g[name]=value + end + setmetatable(getfenv(),{__index=g,__newindex=set}) +end + +-- an example + +a=1 +b=2 +a=10 +b=20 +b=nil +b=200 +print(a,b,c) diff --git a/deps/lua/test/xd.lua b/deps/lua/test/xd.lua new file mode 100644 index 0000000..ebc3eff --- /dev/null +++ b/deps/lua/test/xd.lua @@ -0,0 +1,14 @@ +-- hex dump +-- usage: lua xd.lua < file + +local offset=0 +while true do + local s=io.read(16) + if s==nil then return end + io.write(string.format("%08X ",offset)) + string.gsub(s,"(.)", + function (c) io.write(string.format("%02X ",string.byte(c))) end) + io.write(string.rep(" ",3*(16-string.len(s)))) + io.write(" ",string.gsub(s,"%c","."),"\n") + offset=offset+16 +end -- cgit v1.2.3